#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import ast
import glob
import json
import os
import queue
import random
import shutil
import time
import uuid
import re
from concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION, ALL_COMPLETED, as_completed
from multiprocessing import cpu_count
from pathlib import Path
from threading import Thread, Lock

from clickhouse import log
from clickhouse.client.clickhouse_client import ClickHouseClient
from clickhouse.common.clickhouse_common import get_agent_id, report_job_details, compute_size, get_dir_size
from clickhouse.common.clickhouse_constants import RestoreProgressPhase, CommandReturnCode, ErrorCode, BackupType, \
    AgentConstant, LogLevel
from clickhouse.common.parse_param import JsonParam
from clickhouse.schemas.clickhouse_schemas import CommonBodyResponse, SubJob
from clickhouse.services.clickhouse_file_service import ClickHouseFileService
from common.common import output_result_file, execute_cmd
from common.common_models import SubJobDetails, LogDetail
from common.const import RestoreType, SubJobStatusEnum, CopyDataTypeEnum
from common.util.backup import query_progress, backup
from common.util.cmd_utils import cmd_format
from common.util.exec_utils import exec_mkdir_cmd

UUID_PATTERN = r'UUID\s+\'([\d\w-]+)\''


class ClickHouseRestoreService:
    """
    恢复任务相关接口，负责恢复任务在与clickHouse主机相关的所有直接操作
    """

    @staticmethod
    def allow_restore_in_local_node(pid, job_id, sub_job_id):
        """
        功能描述：是否允许本地运行, 业务目前不需要实现, 主任务执行
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 1: execute allow_restore_in_local_node,pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        response = CommonBodyResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def allow_restore_sub_job_in_local_node(pid, job_id, sub_job_id):
        """
        功能描述：是否允许本地运行, 业务目前不需要实现, 子任务都会执行
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute allow_restore_sub_job_in_local_node,pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        response = CommonBodyResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def restore_prerequisite(pid, job_id, sub_job_id):
        """
        功能描述：恢复前置任务
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 2: execute restore_prerequisite, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        response = CommonBodyResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def restore_gen_sub_job(pid, job_id, sub_job_id):
        """
        功能描述：恢复分解子任务, 主任务执行
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 3: execute restore_gen_sub_job, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        response = []
        try:
            file_content = JsonParam.parse_param_with_jsonschema(pid)
            nodes = file_content['job']['targetEnv']['nodes']
            for node in nodes:
                agent_id = node['extendInfo']['agentId']
                original_agent_id = node['extendInfo']['originalAgentId']
                job_info = f'{original_agent_id}'
                response.append(SubJob(jobId=job_id, execNodeId=agent_id, jobName=node['id'], jobInfo=job_info)
                                .dict(by_alias=True))
                log.info(f'step 3: execute restore_gen_sub_job ,response {response}')
        except Exception as e:
            log.error(e, exc_info=True)
            response.clear()
        output_result_file(pid, response)

    @staticmethod
    def restore(pid, job_id, sub_job_id):
        """
        功能描述：恢复任务, 子任务执行, 每个节点都执行
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 4: execute restore, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        response = CommonBodyResponse()
        client = None
        try:
            file_content = JsonParam.parse_param_with_jsonschema(pid)
            copy_id = ClickHouseRestoreService.get_copy_id(file_content)
            repositories = file_content['job']['copies'][0]['repositories']
            original_agent_id = file_content['subJob']['jobInfo']
            exec_node_id = get_agent_id()
            nodes = file_content['job']['targetEnv']['nodes']
            node_ip = None
            for node in nodes:
                if exec_node_id == node['extendInfo']['agentId']:
                    node_ip = node['extendInfo']['ip']
            client = ClickHouseRestoreService.get_clickhouse_client(pid, file_content)
            backup_data_path = ClickHouseRestoreService.get_remote_data_path(repositories, copy_id, original_agent_id)
            log.info(f'step 4: execute restore, pid: {pid}, job_id: {job_id}, backup_data_path: {backup_data_path}')
            default_path = ClickHouseRestoreService.get_default_path(client)
            product_restore_path = default_path + f'restore/{copy_id}/{exec_node_id}'
            data_total_size = ClickHouseRestoreService.get_total_restore_file_size(file_content, backup_data_path[0])
            # 监控进程，避免被DME误判
            progress_thread = Thread(target=ClickHouseRestoreService.report_restore_progress_thread,
                                     args=(pid, job_id, sub_job_id, file_content, data_total_size))
            progress_thread.setDaemon(True)
            progress_thread.start()
            ClickHouseRestoreService.copy_data_to_product(file_content=file_content, backup_data_path=backup_data_path,
                                                          sub_job_id=sub_job_id,
                                                          product_restore_path=product_restore_path, node_ip=node_ip,
                                                          client=client, copy_id=copy_id, job_id=job_id, pid=pid)
            # 文件复制完成，进度上报
            log_detail = LogDetail(logInfo="job_log_restore_copy_total_label",
                                   logInfoParam=[node_ip, compute_size(data_total_size)],
                                   logLevel=LogLevel.INFO)
            report_job_details(pid, SubJobDetails(taskId=job_id, progress=80, logDetail=[log_detail],
                                                  subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.RUNNING.value))
            ClickHouseRestoreService.execute_restore(pid=pid, job_id=job_id, sub_job_id=sub_job_id,
                                                     product_restore_path=product_restore_path,
                                                     client=client, copy_id=copy_id, node_ip=node_ip)
            job_detail = SubJobDetails(taskId=job_id, progress=RestoreProgressPhase.COMPLETE.value,
                                       subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.COMPLETED.value)
            report_job_details(pid, job_detail)
        except Exception as e:
            log.error(e, exc_info=True)
            response.code = CommandReturnCode.INTERNAL_ERROR.value
            response.body_err = ErrorCode.ERROR_INTERNAL.value
            job_detail = SubJobDetails(taskId=job_id, progress=RestoreProgressPhase.COMPLETE.value,
                                       subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.FAILED.value)
            report_job_details(pid, job_detail)
        finally:
            output_result_file(pid, response.dict(by_alias=True))
            if client is not None:
                client.remove_kerberos_files()

    @staticmethod
    def restore_post_job(pid, job_id, sub_job_id):
        """
        功能描述：恢复后置子任务, 清理生产上的临时备份文件，每个节点都执行
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 5: execute restore_post_job, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        response = CommonBodyResponse()
        client = None
        try:
            file_content = JsonParam.parse_param_with_jsonschema(pid)
            copy_id = ClickHouseRestoreService.get_copy_id(file_content)
            client = ClickHouseRestoreService.get_clickhouse_client(pid, file_content)
            disks = client.query_system_disks()
            ClickHouseRestoreService.remove_temp_file(disks, copy_id)
            job_detail = SubJobDetails(taskId=job_id, progress=RestoreProgressPhase.COMPLETE.value,
                                       subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.COMPLETED.value)
            report_job_details(pid, job_detail)
        except Exception as e:
            log.error(e, exc_info=True)
            response.code = CommandReturnCode.INTERNAL_ERROR.value
            response.body_err = ErrorCode.ERROR_INTERNAL.value
            job_detail = SubJobDetails(taskId=job_id, progress=RestoreProgressPhase.COMPLETE.value,
                                       subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.FAILED.value)
            report_job_details(pid, job_detail)
        finally:
            output_result_file(pid, response.dict(by_alias=True))
            if client is not None:
                client.remove_kerberos_files()

    @staticmethod
    def restore_prerequisite_progress(pid, job_id, sub_job_id):
        """
        功能描述：恢复前置任务进度
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute restore_prerequisite_progress, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        progress = SubJobDetails(taskId=job_id, subTaskId=sub_job_id,
                                 progress=RestoreProgressPhase.COMPLETE.value,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        output_result_file(pid, progress.dict(by_alias=True))

    @staticmethod
    def restore_progress(pid, job_id, sub_job_id):
        """
        功能描述：恢复子任务进度
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute restore_progress, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        progress = SubJobDetails(taskId=job_id, subTaskId=sub_job_id,
                                 progress=RestoreProgressPhase.COMPLETE.value,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        output_result_file(pid, progress.dict(by_alias=True))

    @staticmethod
    def restore_post_job_progress(pid, job_id, sub_job_id):
        """
        功能描述：恢复后置子任务进度
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute restore_post_job_progress, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        progress = SubJobDetails(taskId=job_id, subTaskId=sub_job_id,
                                 progress=RestoreProgressPhase.COMPLETE.value,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        output_result_file(pid, progress.dict(by_alias=True))

    @staticmethod
    def execute_restore(**kwargs):
        """
        功能描述：执行恢复具体流程
        参数：
        @pid
        @job_id：
        @sub_job_id：
        @**kwargs： 包含client,copy_id
        """
        log.info(f'step 4: execute restore, execute_restore start')
        product_restore_path = kwargs.get('product_restore_path')
        client = kwargs.get('client')
        pid = kwargs.get('pid')
        job_id = kwargs.get('job_id')
        copy_id = kwargs.get('copy_id')
        sub_job_id = kwargs.get('sub_job_id')
        disks = client.query_system_disks()
        db_files = glob.glob(f"{product_restore_path}/metadata/*.sql")
        for file in db_files:
            # 获取库名
            db_name = Path(file).stem
            # 读取建库语句
            create_db = ClickHouseFileService.read_file(file)
            client.create_database(create_db)
            # 获取db_Name目录下的建表语句，进行表还原
            table_files = glob.glob(f"{product_restore_path}/metadata/{db_name}/*.sql")
            already_restore_num = 0
            table_lengths = len(table_files)
            for i, table_file in enumerate(table_files):
                table_name = Path(table_file).stem
                try:
                    table_data_detached_paths = ClickHouseRestoreService.get_shadow_paths(disks, db_name, table_name,
                                                                                          copy_id)
                    ClickHouseRestoreService.do_create_table(client, db_name, product_restore_path, table_file)
                    # 挂载分区
                    ClickHouseRestoreService.attach_parts(client, table_data_detached_paths, db_name, disks, table_name)
                    already_restore_num = already_restore_num + 1
                    task_progress = int(already_restore_num / table_lengths * RestoreProgressPhase.END_ATTACH_PARTS
                                        + RestoreProgressPhase.END_COPY_DATA_TO_PRODUCT)
                    log_detail = LogDetail(logInfo="job_log_restore_table_success_label",
                                           logInfoParam=[kwargs.get('node_ip'), str(i + 1), db_name, table_name],
                                           logLevel=LogLevel.INFO)
                    report_job_details(pid, SubJobDetails(taskId=job_id, progress=task_progress,
                                                          logDetail=[log_detail], subTaskId=sub_job_id,
                                                          taskStatus=SubJobStatusEnum.RUNNING.value))
                except Exception as e:
                    log_detail = LogDetail(logInfo="job_log_restore_table_fail_label", logLevel=LogLevel.ERROR,
                                           logInfoParam=[kwargs.get('node_ip'), str(i + 1), db_name, table_name])
                    report_job_details(pid, SubJobDetails(taskId=job_id, progress=0,
                                                          logDetail=[log_detail], subTaskId=sub_job_id,
                                                          taskStatus=SubJobStatusEnum.RUNNING.value))
                    raise Exception(f"restore {db_name}.{table_name} fail ") from e
            log.info(f'job id: {job_id}, sub job id: {sub_job_id}, {db_name}  restore complete')

    @staticmethod
    def move_data_to_detached(table_data_detached_paths, table_info):
        """
        功能描述：移动分区数据到表对应磁盘路径的detached目录下
        参数：
        @table_data_detached_paths： 备份文件复制到生产的路径
        @table_info： 单名信息集合，要遍历table_info中的data_paths和disk_names名称
        例： data_paths：["var1/xxx","var2/xxx"," var3/xxx"]
            disk_names:["disk1","disk2","disk3"]
        返回值：
        表的detached列表
        """
        detached_paths = []
        data_paths = ast.literal_eval(table_info.data_paths)
        disk_names = ast.literal_eval(table_info.disk_names)
        for data_path in data_paths:
            detached_path = os.path.join(data_path, f'detached')
            log.info(f'step 4: execute restore,disk_name: {disk_names[data_paths.index(data_path)]}')
            shadow_disk_path = f'{table_data_detached_paths.get(disk_names[data_paths.index(data_path)])}'
            log.info(f'step 4: execute restore,shadow_disk_path: {shadow_disk_path}')
            if shadow_disk_path == 'None':
                raise Exception("the clickHouse disks has changed！！")
            if not os.path.exists(detached_path):
                exec_mkdir_cmd(detached_path)
                log.info(f'step 4: execute restore, move_data_to_detached mkdir :{detached_path}')
            else:
                shutil.rmtree(detached_path)
                log.info(f'step 4: execute restore, move_data_to_detached rmtree :{detached_path} success')
            for file in os.listdir(shadow_disk_path):
                shutil.move(os.path.join(shadow_disk_path, file), detached_path)
            # 获取文件夹权限
            stat_info = os.stat(data_path)
            # 赋权
            cmd = cmd_format("chown -hR {}:{} {}", stat_info.st_uid, stat_info.st_gid, detached_path)
            code, out, err = execute_cmd(cmd)
            log.info(f'backup_table_data, cmd: {cmd}, code: {code}')
            if int(code) != CommandReturnCode.SUCCESS.value:
                log.error(f'execute {cmd} failed, message: {out}, err: {err}')
                raise Exception(str(err))
            detached_paths.append(detached_path)
        return detached_paths

    @staticmethod
    def move_data_to_product(**kwargs):
        """
        功能描述：移动分区数据到表对应磁盘路径的table_path/prepare_detached目录下
        参数：
        @table_disk_name： 备份文件复制到生产的路径
        @backup_table_path： 数据库名
        @kwargs：数据字典，包含copy_id，数据库磁盘信息disks，db_name，table_name
        返回值：无
        """
        copy_id = kwargs.get('copy_id')
        disks = kwargs.get('disks')
        db_name = kwargs.get('db_name')
        table_name = kwargs.get('table_name')
        table_disk_name = kwargs.get('table_disk_name')
        backup_table_path = kwargs.get('backup_data_path')
        exec_node_id = get_agent_id()
        for disk in disks:
            if disk.name == table_disk_name:
                shadow_disk_path = f"{random.choice(backup_table_path)}/shadow/{db_name}/{table_name}/{disk.name}"
                prepare_detached_path = f'{disk.path}restore/{copy_id}/{exec_node_id}' \
                                        f'/shadow/{db_name}/{table_name}/{disk.name}'
                ClickHouseRestoreService.create_tmp_restore_dir(prepare_detached_path)
                for i in os.listdir(shadow_disk_path):
                    part_path = f"{random.choice(backup_table_path)}/shadow/{db_name}/{table_name}/{disk.name}/{i}"
                    ClickHouseRestoreService.so_copy_data(part_path, prepare_detached_path)
                continue

    @staticmethod
    def so_copy_data(source_path, data_des_path):
        """
        功能描述： cp 命令换成so
        参数：
        @source_path： 源路径
        @data_des_path： 目的路径
        返回值：无
        """
        temp_job_id = str(uuid.uuid4())
        res = backup(temp_job_id, source_path, data_des_path, False)
        if not res:
            raise Exception(str(f"Failed to start backup, temp_job_id: {temp_job_id}."))
        while True:
            time.sleep(2)
            status, progress, data_size = query_progress(temp_job_id)
            if status == 1:
                log.info(f"copy backup data completed, temp_job_id: {temp_job_id}.")
                break
            elif status == 3:
                log.info(f"copy backup data  failed, jobId: {temp_job_id}.")
                raise Exception(str(f"copy backup data failed, jobId: {temp_job_id}."))

    @staticmethod
    def create_tmp_restore_dir(tmp_restore_dir):
        """
        功能描述：创建临时恢复目录，如果存在则清空，不存在则创建
        参数：
        @tmp_restore_dir： 目录绝对路径
        返回值：无
        """
        if not os.path.exists(tmp_restore_dir):
            exec_mkdir_cmd(tmp_restore_dir)
            log.info(f'create dir {tmp_restore_dir} success')
        else:
            shadow_child_names = os.listdir(tmp_restore_dir)
            for shadow_child_name in shadow_child_names:
                temp_path = os.path.join(tmp_restore_dir, shadow_child_name)
                shutil.rmtree(temp_path)
            log.info(f'clean dir {tmp_restore_dir} success')

    @staticmethod
    def attach_parts(client, table_data_detached_paths, db_name, disks, table_name):
        """
        功能描述：移动分区数据到表对应磁盘路径的detached目录下
        参数：
        @client： clickhouse客户端
        @table_data_detached_paths: 数据文件地址
        @db_name： 数据库名
        @disks： clickHouse磁盘
        @table_name: 表名
        返回值：无
        """
        log.info(f"start attach part  ")
        if table_data_detached_paths:
            tables = client.query_allow_backup_tables(db_name, table_name)
            # 查询表数据所在的磁盘路径
            # tables[0].data_paths是个列表，用disk_names标注名称，避免二次查询
            disk_names = ClickHouseRestoreService.get_disk_names(disks, tables[0])
            tables[0].disk_names = str(disk_names)
            # 移动分区数据到表对应磁盘路径的detached目录下
            detached_paths = ClickHouseRestoreService.move_data_to_detached(table_data_detached_paths,
                                                                            tables[0])
            # 挂载分区
            for detached_path in detached_paths:
                for partition in os.listdir(detached_path):
                    log.info(f'step 4: execute restore, attach_part partition :{db_name}.{table_name} {partition}')
                    client.attach_table_part(db_name, table_name, partition)
                    log.info(f'attach_part partition :{db_name}.{table_name} {partition} success')

    @staticmethod
    def get_disk_names(disks, table_info):
        """
        功能描述：根据系统磁盘名和路径名，填充table_info中的diskName
        参数：
        @disks： class Disk name 服务器配置中的磁盘名称 ，path 文件系统中挂载点的路径
        @table_info： 表的detached列表，每个detached目录下包含了分区文件
        返回值：
        @disk_names: 返回路径对应的磁盘列表
        """
        disk_names = []
        # 在表信息中配置磁盘名称
        for disk in disks:
            for i in ast.literal_eval(table_info.data_paths):
                if disk.path in i:
                    disk_names.append(disk.name)
                    continue
        return disk_names

    @staticmethod
    def restore_file_copy(**kwargs):
        """
        功能描述：恢复文件copy,将文件系统备份文件，按指定的db,table文件复制到生产环境目录中
        复制完成后生产中的备份文件目录和文件系统的树形结构一样，复制指定库/表的元数据和表数据
        ├── disk1_path/restore/{copy_id}/{agent_id}
        │   ├── metadata.json
        │   ├── metadata
        │   │   └── dbName.sql
        │   │   └── dbName
        │   │   │     └── tableName.sql
        │   │   │     └── tableName.meta {"backup_data":"0","engine":"xxx"}
        │   ├── shadow
        │   │   └── dbName
        │   │   │    └── tableName
        │   │   │    │     └── diskName
        │   │   │    │     │     └── xxxxxxxxx1(分区1)
        │   │   │    │     │     └── xxxxxxxxx2(分区2)
        │   │   │    │     │     └── xxxxxxx…..(分区…)
        参数：
        @db_name： 恢复的库名
        @table_name： 恢复的表名
        @product_path: 生产环境中预备的复制路径 /disk_path/restore/{copy_id}
        @backup_data_path： 文件系统中的节点备份路径 /mount_path/{copy_id}/{original_agent_id}
        返回值：无
        """
        log.info(f"start copy fine grained restore file")
        restore_type = kwargs.get('restore_type')
        copy_id = kwargs.get('copy_id')
        db_name = kwargs.get('db_name')
        table_name = kwargs.get('table_name')
        product_path = kwargs.get('product_restore_path')
        backup_data_path = kwargs.get('backup_data_path')
        exec_queue = kwargs.get('exec_queue')
        log.info(f"start copy fine grained param is right restore_type :{restore_type}")

        if restore_type == RestoreType.FINE_GRAINED_RESTORE.value:
            fine_grained_db_file = os.path.join(backup_data_path[0], f"metadata/{db_name}.sql")
            fine_grained_table_file = os.path.join(backup_data_path[0], f"metadata/{db_name}/{table_name}.sql")
            target_db_file = os.path.join(product_path, f"metadata/{db_name}.sql")
            target_grained_table_file = os.path.join(product_path, f"metadata/{db_name}/{table_name}.sql")
            backup_table_meta = os.path.join(backup_data_path[0], f"metadata/{db_name}/{table_name}.meta")
            target_table_meta = os.path.join(product_path, f"metadata/{db_name}/{table_name}.meta")
            target_table_path = os.path.join(product_path, f"metadata/{db_name}")
            try:
                # 创建数据库临时目录
                if not os.path.exists(target_table_path):
                    exec_mkdir_cmd(target_table_path)
            except Exception as e:
                # 创建临时目录碰撞异常不处理，延迟3秒直接跳过
                log.error(e, exc_info=True)
                time.sleep(3)
            # 复制建库 建表语句
            shutil.copy(fine_grained_table_file, target_grained_table_file)
            shutil.copy(backup_table_meta, target_table_meta)
            if not os.path.exists(target_db_file):
                shutil.copy(fine_grained_db_file, target_db_file)

        # 复制表数据
        backup_table_path = f"{random.choice(backup_data_path)}/shadow/{db_name}/{table_name}"
        log.info(f"start copy data to product backup_table_path : {backup_table_path} ")
        try:
            if os.path.exists(backup_table_path):
                disks = kwargs.get('client').query_system_disks()
                table_disk_names = os.listdir(backup_table_path)
                for table_disk_name in table_disk_names:
                    ClickHouseRestoreService.move_data_to_product(table_disk_name=table_disk_name,
                                                                  backup_data_path=backup_data_path, copy_id=copy_id,
                                                                  disks=disks, db_name=db_name, table_name=table_name)
                total_size = get_dir_size(backup_table_path)
                log_detail = LogDetail(logInfo="job_log_restore_table_copy_label", logLevel=LogLevel.INFO,
                                       logInfoParam=[kwargs.get('node_ip'), db_name, table_name,
                                                     compute_size(total_size)])
                report_job_details(kwargs.get('pid'),
                                   SubJobDetails(taskId=kwargs.get('job_id'), progress=80, logDetail=[log_detail],
                                                 subTaskId=kwargs.get('sub_job_id'),
                                                 taskStatus=SubJobStatusEnum.RUNNING.value))
            return CommandReturnCode.SUCCESS.value
        except Exception as e:
            exec_queue.put("Termination")
            log.error(e, exc_info=True)
            raise Exception(f"copy backup data to product: {db_name}.{table_name} fail") from e

    @staticmethod
    def remove_temp_file(disks, copy_id):
        """
        功能描述：清空恢复任务中复制备份文件的临时目录
        参数：
        @disks： clickHouse磁盘地址列表
        @copy_id: 备份文件id，用于查找恢复任务中的唯一目录
        返回值：无
        """
        for disk in disks:
            disk_shadow_path = disk.path + f'/restore'
            if not os.path.exists(disk_shadow_path):
                continue
            shadow_child_names = os.listdir(disk_shadow_path)
            for shadow_child_name in shadow_child_names:
                if shadow_child_name.startswith(copy_id):
                    temp_path = os.path.join(disk_shadow_path, shadow_child_name)
                    shutil.rmtree(temp_path)

    @staticmethod
    def query_local_node(nodes):
        """
        功能描述：查询当前Agent节点
        参数：
        @nodes： 节点列表
        返回值：节点索引号, 节点
        """
        local_agent_id = get_agent_id()
        index = None
        node = None
        for i, v in enumerate(nodes):
            agent_id = v['extendInfo']['agentId']
            if local_agent_id == agent_id:
                index = i
                node = v
                break
        return index, node

    @staticmethod
    def copy_data_to_product(**kwargs):
        """
        功能描述：复制文件系统中的备份文件到生产环境
        复制完成后生产中的备份文件目录和文件系统的树形结构一样
        ├── disk1_path/restore/{copy_id}/{agent_id}
        │   ├── metadata.json
        │   ├── metadata
        │   │   └── dbName.sql
        │   │   └── dbName
        │   │   │     └── tableName.sql
        │   ├── shadow
        │   │   └── dbName
        │   │   │    └── tableName
        │   │   │    │     └── diskName
        │   │   │    │     │     └── xxxxxxxxx1(分区1)
        │   │   │    │     │     └── xxxxxxxxx2(分区2)
        │   │   │    │     │     └── xxxxxxx…..(分区…)
        参数：
        @file_content： 主流程入参解析后的字典
        @product_restore_path： 生产路径
        @backup_data_path： 备份文件路径
        返回值：无
        """
        log.info(f"start copy data to product ")
        file_content = kwargs.get('file_content')
        backup_data_path = kwargs.get('backup_data_path')
        restore_type = file_content['job']['jobParam']['restoreType']
        product_restore_path = kwargs.get('product_restore_path')
        if not os.path.exists(product_restore_path):
            exec_mkdir_cmd(product_restore_path)
        if restore_type == RestoreType.FINE_GRAINED_RESTORE.value:
            restore_tables = file_content['job']['restoreSubObjects']
            table_names = []
            for _, restore in enumerate(restore_tables):
                table_names.append(restore['name'])
                db_name = restore['parentName']
            ClickHouseRestoreService.multi_thread_copy(db_name=db_name, table_names=table_names, **kwargs)
        else:
            metadata_path = f"{backup_data_path[0]}/metadata"
            ClickHouseRestoreService.so_copy_data(metadata_path, product_restore_path)
            db_files = glob.glob(f"{backup_data_path[0]}/metadata/*.sql")
            for file in db_files:
                db_name = Path(file).stem
                table_files = glob.glob(f"{backup_data_path[0]}/metadata/{db_name}/*.sql")
                table_names = []
                for _, table_file in enumerate(table_files):
                    table_names.append(Path(table_file).stem)
                ClickHouseRestoreService.multi_thread_copy(db_name=db_name, table_names=table_names, **kwargs)

    @staticmethod
    def multi_thread_copy(**kwargs):
        """
        功能描述：copy备份数据,讲任务添加到线程池
        参数：
        @db_name ：库名
        @table_name: 表名
        @kwargs：入参map
        返回值：无
        """
        file_content = kwargs.get('file_content')
        db_name = kwargs.get('db_name')
        table_names = kwargs.get('table_names')
        backup_data_path = kwargs.get('backup_data_path')
        restore_type = file_content.get('job').get('jobParam').get('restoreType')
        product_restore_path = kwargs.get('product_restore_path')
        copy_id = kwargs.get('copy_id')
        client = kwargs.get('client')
        pid = kwargs.get('pid')
        job_id = kwargs.get('job_id')
        sub_job_id = kwargs.get('sub_job_id')
        node_ip = kwargs.get('node_ip')
        running_thread_num = [0]
        running_thread_num_lock = Lock()
        future_list = []
        exec_queue = queue.Queue()
        log_detail = LogDetail(logInfo="job_log_restore_table_table_total_label",
                               logInfoParam=[node_ip, str(len(table_names))], logLevel=LogLevel.INFO)
        report_job_details(pid, SubJobDetails(taskId=job_id, progress=80,
                                              logDetail=[log_detail], subTaskId=sub_job_id,
                                              taskStatus=SubJobStatusEnum.RUNNING.value))
        with ThreadPoolExecutor(max_workers=cpu_count() / 2) as pool:
            for _, table_name in enumerate(table_names):
                future_list.append(ClickHouseRestoreService.
                                   restore_data(running_thread_num_lock=running_thread_num_lock,
                                                running_thread_num=running_thread_num, pool=pool,
                                                exec_queue=exec_queue, pid=pid, job_id=job_id,
                                                sub_job_id=sub_job_id, node_ip=node_ip,
                                                db_name=db_name, product_restore_path=product_restore_path,
                                                copy_id=copy_id, table_name=table_name, client=client,
                                                backup_data_path=backup_data_path, restore_type=restore_type))
            wait(future_list, return_when=FIRST_EXCEPTION)
            for task in reversed(future_list):
                task.cancel()
            wait(future_list, return_when=ALL_COMPLETED)
        for future in as_completed(future_list):
            if int(future.result()) != CommandReturnCode.SUCCESS.value:
                raise Exception("copy data to product  fail")

    @staticmethod
    def restore_data(**kwargs):
        """
        功能描述：copy备份数据,讲任务添加到线程池
        参数：
        @kwargs：入参map
        返回值：无
        """
        future = None
        db_name = kwargs.get('db_name')
        table_name = kwargs.get('table_name')
        running_thread_num_lock = kwargs.get('running_thread_num_lock')
        running_thread_num = kwargs.get('running_thread_num')
        pool = kwargs.get('pool')
        while True:
            log.info(f"start copy data to product running_thread_num : {running_thread_num[0]} ")
            running_thread_num_lock.acquire()
            if running_thread_num[0] < cpu_count() / 2:
                running_thread_num[0] = running_thread_num[0] + 1
                running_thread_num_lock.release()

                def restore_metadata_and_data_done_callback(future):
                    running_thread_num_lock.acquire()
                    running_thread_num[0] = running_thread_num[0] - 1
                    running_thread_num_lock.release()

                try:
                    future = pool.submit(ClickHouseRestoreService.restore_file_copy, **kwargs)
                    future.add_done_callback(restore_metadata_and_data_done_callback)
                except Exception as e:
                    log.error(e, exc_info=True)
                    raise Exception(f"copy backup data to product: {db_name}.{table_name} fail") from e
                break
            else:
                running_thread_num_lock.release()
                time.sleep(AgentConstant.SLEEP_TIME)
        return future

    @staticmethod
    def get_remote_data_path(repositories, copy_id, original_agent_id):
        """
        功能描述：获取备份文件所在的文件系统路径地址
        参数：
        @repositories： 文件地址仓
        @copy_id: 备份文件id，用于查找恢复任务中的唯一目录
        @original_agent_id： 生成备份文件的agent_id
        返回值：该节点备份文件的地址
        """
        backup_data_path = []
        for repository in repositories:
            # 遍历所有文件系统的路径，找到备份文件所在的文件系统
            if os.path.exists(f'{repository.get("path")[0]}/{copy_id}/{original_agent_id}'):
                for path in repository.get("path"):
                    backup_data_path.append(f'{path}/{copy_id}/{original_agent_id}')
                log.info(f'step 4: execute restore, backup_data_path: {backup_data_path}')
        if backup_data_path is None:
            # 获取备份文件路径失败
            raise Exception("Failed to get backup_data_path")
        return backup_data_path

    @staticmethod
    def do_create_table(client, db_name, product_restore_path, table_file):
        """
        功能描述：恢复表结构，判断是否需要删除表，判断是否有数据文件，做truncate
        参数：
        @client： clickHouse 客户端
        @db_name： 库名
        @product_restore_path： 生产备份路径
        @table_file： 建表语句的文件
        返回值：无
        """
        # 读取建表语句
        create_table_sql = ClickHouseFileService.read_file(table_file)
        table_name = Path(table_file).stem
        tables = client.query_allow_backup_tables(db_name, table_name)
        meta_path = f"{product_restore_path}/metadata/{db_name}/{table_name}.meta"
        log.info(f'step 4: execute restore, do_create_table tables meta_path :{meta_path}')
        table_meta = json.loads(ClickHouseFileService.read_file(meta_path))
        log.info(f'step 4: execute restore, do_create_table tables:{tables}, create_table_sql:{create_table_sql}')
        if tables:
            if tables[0].create_table_query != create_table_sql:
                log.info(f'the create table sql is different：{tables[0].create_table_query != create_table_sql}')
                # 只有建表语句不一样，才删除表,避免重新创建表的用户归属问题、以及恢复任务失败将未改动的表结构破坏无法恢复
                client.drop_table(db_name, table_name)
                client.create_table(create_table_sql.replace("`", "\\`"))
                log.info(f'step 4: execute restore, create table success:{table_name}')
        else:
            # 解析建表语句中的uuid，查询uuid是否存在，如果存在，则将对应的库下的表删除
            # 执行client.drop_table(db_name, table_name)
            # 1、使用正则将建表语句中的uuid取出来
            result = re.search(UUID_PATTERN, create_table_sql)
            if result:
                table_uuid = result.group(1)
                log.info(f'step 4: execute restore, uuid:{table_uuid}')
                # 2、根据uuid查询是否有别的表占用
                uuid_tables = client.query_tables_by_uuid(table_uuid)
                log.info(f'step 4: execute restore, uuid_tables:{uuid_tables}')
                if uuid_tables:
                    # 3、将占用的表进行删除
                    client.drop_table(uuid_tables[0].database, uuid_tables[0].name)
                    log.info(
                        f'step 4: execute restore, drop uuid_table no delay SUCCESS:'
                        f'database:{uuid_tables[0].database}, name:{uuid_tables[0].name}')
            # 创建表
            client.create_table(create_table_sql.replace("`", "\\`"))
        if table_meta["backup_data"] == BackupType.TABLE_AND_DATA.value and \
                f'{table_meta["engine"]}'.__contains__("MergeTree"):
            client.truncate_table_no_engine(db_name, table_name)
        log.info(f'step 4: execute restore, create table success:{table_name}')

    @staticmethod
    def get_shadow_paths(disks, db_name, table_name, copy_id):
        """
        功能描述：获取备份文件所在的文件系统路径地址
        参数：
        @disks： clickHouse 磁盘目录列表
        @db_name: 库名
        @table_name： 表名
        @copy_id: 备份文件id，用于查找恢复任务中的唯一目录
        返回值：数据文件地址
        """
        exec_node_id = get_agent_id()
        table_data_detached_paths = {}
        for disk in disks:
            product_shadow_path = f'{disk.path}restore/{copy_id}/{exec_node_id}/shadow/{db_name}/{table_name}'
            if os.path.exists(product_shadow_path):
                table_data_detached_paths[disk.name] = product_shadow_path
        return table_data_detached_paths

    @staticmethod
    def get_default_path(client):
        """
        功能描述：获取clickHouse节点的默认路径，没有default盘则返回第一个盘
        参数：
        @client： clickHouse 客户端
        返回值：clickHouse节点的默认路径
        """
        default_path = None
        disks = client.query_system_disks()
        for disk in disks:
            if disk.name == f'default':
                default_path = disk.path
                return default_path
        # 不存在名为default盘，默认取第一个盘
        if default_path is None:
            default_path = disks[0].path
        return default_path

    @staticmethod
    def get_total_restore_file_size(file_content, backup_data_path):
        """
        功能描述：获取备份文件总大小
        参数：
        @file_content： 恢复任务的入参json
        @backup_data_path: 备份文件地址
        返回值：备份文件大小
        """
        restore_type = file_content['job']['jobParam']['restoreType']
        data_total_size = 0
        if restore_type == RestoreType.FINE_GRAINED_RESTORE.value:
            restore_tables = file_content['job']['restoreSubObjects']
            for restore_table in restore_tables:
                db_name = restore_table['parentName']
                table_name = restore_table['name']
                table_file = os.path.join(backup_data_path, f"metadata/{db_name}/{table_name}.sql")
                table_meta = os.path.join(backup_data_path, f"metadata/{db_name}/{table_name}.meta")
                data_total_size += os.path.getsize(table_file)
                data_total_size += os.path.getsize(table_meta)
                backup_table_path = f"{backup_data_path}/shadow/{db_name}/{table_name}"
                if os.path.exists(backup_table_path):
                    data_total_size += get_dir_size(backup_table_path)
            db_file = os.path.join(backup_data_path, f"metadata/{db_name}.sql")
            data_total_size += os.path.getsize(db_file)
            return data_total_size
        else:
            data_total_size = get_dir_size(backup_data_path)
            return data_total_size

    @staticmethod
    def report_restore_progress_thread(pid, job_id, sub_job_id, file_content, data_total_size):
        """
        功能描述：单起进程，用于上报复制文件进度，避免超大文件复制超过10分钟，致使DME误判进程死亡
        参数：
        @pid： pid
        @job_id：任务id
        @sub_job_id： 子任务id
        @disks：生产clickHose 磁盘信息
        @copy_id：copy_id
        @data_total_size： 需要复制的总文件集大小
        返回值：无
        """
        # 如果总大小为0，直接返回
        client = ClickHouseRestoreService.get_clickhouse_client(pid, file_content)
        disks = client.query_system_disks()
        copy_id = ClickHouseRestoreService.get_copy_id(file_content)
        if data_total_size == 0:
            job_detail = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.RUNNING,
                                       progress=100, dataSize=data_total_size)
            report_job_details(pid, job_detail)
            return
        save_size = 0
        while save_size < data_total_size:
            save_size = ClickHouseRestoreService.get_save_files_size(disks, copy_id)
            # 进度计算：(save / total) * 80
            progress = (save_size / data_total_size) * RestoreProgressPhase.END_COPY_DATA_TO_PRODUCT
            job_detail = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.RUNNING,
                                       progress=progress, dataSize=data_total_size)
            report_job_details(pid, job_detail)
            log.info(f'report restore copy progress, save/total: {save_size}/{data_total_size}')
            time.sleep(3)

    @staticmethod
    def get_save_files_size(disks, copy_id):
        """
        功能描述：获取文件总大小
        参数：
        @disks： clickHouse 磁盘目录列表
        @copy_id: 备份文件id，用于查找恢复任务中的唯一目录
        返回值：备份文件大小
        """
        log.info('start to get save files size')
        exec_node_id = get_agent_id()
        save_size = 0
        for disk in disks:
            file = f'{disk.path}restore/{copy_id}/{exec_node_id}'
            if os.path.exists(file):
                save_size = save_size + get_dir_size(file)
                log.info(f'save file: {file}, size: {get_dir_size(file)}')
        log.info('end to get save files size')
        return save_size

    @staticmethod
    def get_clickhouse_client(pid, file_content):
        """
        功能描述：获取clickHouse客户端
        参数：
        @pid： pid
        @file_content: 恢复任务入参json
        返回值：clickHouse客户端
        """
        nodes = file_content['job']['targetEnv']['nodes']
        index, node = ClickHouseRestoreService.query_local_node(nodes)
        app_extend_info = node['extendInfo']
        auth_extend_info = node['auth']['extendInfo']
        env_prefix = 'job_targetEnv_nodes_' + str(index) + '_auth'
        client = ClickHouseClient(pid, app_extend_info, env_prefix, auth_extend_info)
        return client

    @staticmethod
    def get_copy_id(file_content):
        """
        功能描述：获取copy_id
        参数：
        @file_content: 恢复任务入参json
        返回值：copy_id
        """
        restore_type = file_content['job']['copies'][0]['type']
        if restore_type == CopyDataTypeEnum.S3_ARCHIVE or restore_type == CopyDataTypeEnum.TAP_ARCHIVE:
            copy_id = file_content['job']['copies'][0]['extendInfo']['extendInfo']['copyId']
        else:
            copy_id = file_content['job']['copies'][0]['extendInfo']['copyId']
        return copy_id
