#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import sqlite3
import threading
import time
from enum import Enum

from dws.commons.log_utils import log
from common.util.cmd_utils import cmd_format
from dws.commons.common import get_cur_host_all_ip, get_subtask_info_from_file, check_subtask_info, \
    dws_exec_cmd_by_type, get_dws_config, copy_file_into_sandbox, open_non_secret, close_non_secret, \
    construct_roach_param, process_table_name_in_table_file
from common.common import check_port_is_used, check_command_injection_exclude_quote
from common.common_models import LogDetail, SubJobDetails
from common.const import SubJobStatusEnum, RestoreType
from dws.backup.cluster_backup import ClusterBackup
from dws.commons.const import IntrusiveMode, DwsSubType, RestoreProgressFileName, DwsDeployType, \
    PERMISSION_640, DwsRoachPort
from dws.commons.function_tool import log_start, retry
from dws.commons.error_code import DwsErrorCode
from dws.commons.job_info import JobInfo
from dws.commons.dws_exception import ErrCodeException
from dws.commons.progress_notify import report_job_details_comm, read_file, get_total_data_size_from_speed_file
from dws.resource.dws_cluster import DwsCluster
from dws.resource.dws_common import DwsCommon
from dws.resource.dws_database import DwsDatabase
from dws.resource.query_interface import QueryRes
from common.util.exec_utils import exec_mkdir_cmd, exec_overwrite_file, su_exec_rm_cmd
from common.file_common import change_path_permission, exec_lchown_dir_recursively
from common.util.common_utils import get_group_name_by_os_user

NO_TABLE_IN_DATABASE_ERR_MSG = 'No table present in current database'


class RoachRestoreJob:
    class RoachRestoreProgress(int, Enum):
        CHECK_RESTORE_CONDITION_END = 5
        TRANSFER_DATA_END = 95
        START_CLUSTER_END = 100

    def __init__(self, parse_restore_param_obj):
        self._parse_restore_param_obj = parse_restore_param_obj
        if self._parse_restore_param_obj.get_restore_param().get("subJob"):
            self._main_task_id = self._parse_restore_param_obj.get_restore_param().get("subJob").get("jobId")
            self._sub_task_id = self._parse_restore_param_obj.get_restore_param().get("subJob").get("subJobId")
        self._progress = 0
        self._job_status = SubJobStatusEnum.RUNNING
        self._master_port = DwsRoachPort.ROACH_PORT_START
        self._progress_file = ""
        self._update_progress_interval = 10
        self._db_user, self._env_file = self._parse_restore_param_obj.get_target_cluster_auth_info()
        self._error_code = 0
        self._log_detail_param = []
        self._db_name = ''

    def do_restore_job(self):
        """
        执行roach恢复流程：
        (1) 恢复前条件检查： 如果是侵入式，则判断是否每个集群节点都装有代理；如果是非侵入式，则判断roach client信息是否正确
        (2) 组装恢复命令
        (3) 执行恢复命令
        (4) 启动集群
        """
        # 恢复前条件检查
        log.info(f"Start to exec restore subtask. main task: {self._main_task_id}, subtask:{self._sub_task_id}")
        dws_cluster = DwsCluster(self._db_user, self._env_file)
        if dws_cluster.is_disk_damaged():
            self.update_restore_progress(SubJobStatusEnum.FAILED)
            return False
        if not self.check_last_subtask_status():
            self.update_restore_progress(SubJobStatusEnum.FAILED)
            return False
        if self._parse_restore_param_obj.get_intrusive_mode() == IntrusiveMode.INTRUSIVE_MODE:
            if not self._check_agents_enough():
                self.update_restore_progress(SubJobStatusEnum.FAILED)
                return False
        ret, restore_to_new = self._parse_restore_param_obj.is_restore_new_cluster()
        if ret and restore_to_new and not self._check_top_file():
            self.update_restore_progress(SubJobStatusEnum.FAILED)
            return False

        self._progress = RoachRestoreJob.RoachRestoreProgress.CHECK_RESTORE_CONDITION_END
        # 组装命令
        try:
            cmd = self._construct_restore_cmd()
        except Exception as err:
            log.error(f"Construct restore cmd failed for {err}.")
            self.update_restore_progress(SubJobStatusEnum.FAILED)
            return False
        log.debug(f"Restore cmd is : {cmd}")
        if not cmd:
            log.error(f"Construct restore cmd failed. main task: {self._main_task_id}, subtask:{self._sub_task_id}")
            self.update_restore_progress(SubJobStatusEnum.FAILED)
            return False
        # 开启免密
        if not open_non_secret(self._db_user, self._env_file):
            self.update_restore_progress(SubJobStatusEnum.FAILED)
            return False
        # 执行恢复命令
        if not self._execute_restore(cmd):
            self.update_restore_progress(SubJobStatusEnum.FAILED)
            self._clean_restore_data()
            close_non_secret(self._db_user, self._env_file)
            return False
        self._progress = RoachRestoreJob.RoachRestoreProgress.TRANSFER_DATA_END
        # 启动集群
        if not self._start_cluster():
            self.update_restore_progress(SubJobStatusEnum.FAILED)
            self._clean_restore_data()
            close_non_secret(self._db_user, self._env_file)
            return False
        self._progress = RoachRestoreJob.RoachRestoreProgress.START_CLUSTER_END
        self._job_status = SubJobStatusEnum.COMPLETED
        self.update_restore_progress(SubJobStatusEnum.COMPLETED)
        self._clean_restore_data()
        close_non_secret(self._db_user, self._env_file)
        log.info(f"Exec restore task {self._sub_task_id} successfully.")
        return True

    @log_start()
    def check_last_subtask_status(self):
        ret, cluster_nodes, agent_nodes = self._parse_restore_param_obj.get_nodes_info()
        if not ret:
            log.error(f"Get cluster and agent nodes failed, sub task: {self._main_task_id}, " \
                      f"main task: {self._sub_task_id}.")
            return False
        _, cache_path = self._parse_restore_param_obj.get_available_path()
        copy_id = self._parse_restore_param_obj.get_copy_id()
        passed_nodes = get_subtask_info_from_file("white_sub_job", os.path.join(cache_path, "tmp", copy_id))
        return check_subtask_info(cluster_nodes, agent_nodes, passed_nodes)

    def update_restore_progress(self, job_status, error_code=0):
        log.info(f"Update task progress. job_status: {job_status}")
        self._job_status = job_status
        if job_status == SubJobStatusEnum.COMPLETED:
            copy_total_size = self._parse_restore_param_obj.get_copy_total_size()
            self._write_progress(self._construct_progress_st(copy_total_size))
        else:
            self._write_progress(self._construct_progress_st())

    def get_progress_file(self):
        _, cache_path = self._parse_restore_param_obj.get_available_path()
        if cache_path == "":
            log.error(
                f"No cache path can be read or written.cache_path:{cache_path},main task:{self._main_task_id}")
            return ""
        return os.path.join(cache_path, "tmp",
                            f"{RestoreProgressFileName.RESTORE_PROGRESS}"
                            f"{self._parse_restore_param_obj.get_copy_id()}")

    def get_progress(self):
        return self._progress

    def restore_can_run_in_local_node(self):
        # 原集群的恢复，只能在备份的节点做恢复
        ret, restore_to_new = self._parse_restore_param_obj.is_restore_new_cluster()
        if ret and not restore_to_new:
            log.debug(f"Restore to old cluster.")
            if self._parse_restore_param_obj.get_backup_host_ip() not in get_cur_host_all_ip():
                log.warn(f"This backup is not executed in this host.")
                return False
        return True


    def fine_grained_restore_prepare(self, old_table_file, old_table_list):
        if not self._prepare_table_file(old_table_file, old_table_list):
            return False
        if not copy_file_into_sandbox(self._db_user, old_table_file, os.path.join('/home', self._db_user)):
            return False
        return True

    @log_start()
    def _check_schema_res_in_database(self, db_name, schema_name):
        """
        函数功能: 场景： 数据库恢复到schema级别；
                检查生产环境上不存在待恢复的schmea。 参见错误码 NO_RESTORE_TARGET_SCHEMA_IN_DATABASE
        """
        if not db_name or not schema_name:
            log.error(f"Param err: [{db_name}], [{schema_name}]")
            return False
        if schema_name not in QueryRes(self._db_user, self._env_file).get_all_schemas(db_name):
            log.error(f"No schema[{schema_name}] res in db[{db_name}]")
            return False
        return True

    @log_start()
    def _check_table_in_copy(self, db_name, target_table_list):
        """
        功能说明：用于表级副本普通恢复，判断副本中是否存在目标表
        返回值：在副本中存在的表列表
        """
        copy_type = self._parse_restore_param_obj.get_copy_type()
        copy_meta_path, cache_path = self._parse_restore_param_obj.get_available_path()
        if copy_type == "s3Archive":
            copy_meta_path = cache_path
        copy_meta_file = os.path.join(copy_meta_path, 'meta', self._parse_restore_param_obj.get_source_backup_copy_id(),
                                      "sqlite", "copymetadata.sqlite")
        if not os.path.exists(copy_meta_file):
            log.error(f"Copy meta file {copy_meta_file} not exist.")
            return []

        try:
            object_conn = sqlite3.connect(copy_meta_file)
        except Exception as err:
            log.error(f"Fail to connect {copy_meta_file} for {err}")
            return []
        object_cur = object_conn.cursor()
        not_exist_table = []
        exist_table = []
        for single_table in target_table_list:
            if len(single_table.split(".")) != 2:
                log.warning(f"Table {single_table} is illegal")
                not_exist_table.append(f"{db_name}.{single_table}")
                continue
            schema_name = single_table.split(".")[0]
            table_name = single_table.split(".")[1]
            query_cmd = f"select * from T_COPY_METADATA where name='{table_name}' " \
                        f"and type='table' and parent_path like '%{db_name}/{schema_name}%'"
            object_tables = object_cur.execute(query_cmd).fetchall()
            if not object_tables:
                not_exist_table.append(f"{db_name}.{single_table}")
                log.warning(f"Table {db_name}.{single_table} is not in copy.")
                continue
            exist_table.append(single_table)

        if not_exist_table:
            log_detail = LogDetail(logInfo="dws_restore_table_not_exist_label",
                                   logInfoParam=[','.join(not_exist_table)],
                                   logTimestamp=int(time.time()), logLevel=2)
            report_job_details_comm(self._sub_task_id,
                                    SubJobDetails(taskId=self._main_task_id, subTaskId=self._sub_task_id,
                                                  taskStatus=self._job_status.value, progress=self._progress,
                                                  dataSize=0, logDetail=[log_detail]).dict(by_alias=True))
        return exist_table

    @log_start()
    @retry(exception_to_check=Exception)
    def _exec_start_cluster_cmd(self, start_cmd):
        """
        函数功能: 执行器群启动命令
        参数：start_cmd  启动命令
        返回值： ret1： 是否启动成功  True or False
        """
        return_code, ret_output = dws_exec_cmd_by_type(self._db_user, self._env_file, start_cmd)
        dws_cluster = DwsCluster(self._db_user, self._env_file)
        if dws_cluster.get_cluster_state() != "Normal":
            raise Exception(f"{ret_output}")

        return True

    def _check_top_file(self):
        check_cmd = f"gs_ssh -c \\\"cat /opt/huawei/mppdb-install-config.xml\\\""
        log.info(f"Check top file cmd: {check_cmd}")
        return_code, output = dws_exec_cmd_by_type(self._db_user, self._env_file, check_cmd)
        if not return_code or "[FAILURE]" in output:
            log.error("Top file is illegal.")
            self._error_code = DwsErrorCode.CHECK_TOP_FILE_FAIL
            return False
        return True

    def _update_progress_periodically(self):
        """
        函数功能：获取恢复任务进度
        实现说明：进度分为三个阶段：恢复检查、恢复命令执行、启动集群
                (1) 恢复检查完成是：5%
                (2) 恢复命令执行中的进度为:5%~95%。计算公式为：5% + 数据量传输进度*90%
                (3) 启动集群完成后才是:100%
        """
        while True:
            # 如果任务已经结束
            if self._job_status == SubJobStatusEnum.COMPLETED:
                log.info(f"Exit progress thread, restore task is completed,"
                         f" main task:{self._main_task_id}, subtask: {self._sub_task_id}")
                break
            if self._job_status != SubJobStatusEnum.RUNNING:
                log.info(f"Exit progress thread. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
                return self._write_progress(self._construct_progress_st())
            # 如果任务还没到恢复子任务阶段
            if self._progress < RoachRestoreJob.RoachRestoreProgress.CHECK_RESTORE_CONDITION_END:
                self._write_progress(self._construct_progress_st())
                time.sleep(self._update_progress_interval)
                continue
            # 如果正在执行恢复子任务
            copy_total_size = self._parse_restore_param_obj.get_copy_total_size()
            if not copy_total_size or copy_total_size <= 0:
                log.warn(F"Fail to get copy total size for {self._main_task_id}.")
                self._write_progress(self._construct_progress_st())
                time.sleep(self._update_progress_interval)
                continue
            if self._progress >= RoachRestoreJob.RoachRestoreProgress.TRANSFER_DATA_END:
                self._write_progress(self._construct_progress_st(copy_total_size))
                time.sleep(self._update_progress_interval)
                continue
            _, cache_path = self._parse_restore_param_obj.get_available_path()
            copy_id = self._parse_restore_param_obj.get_copy_id()
            already_restore_size = get_total_data_size_from_speed_file(cache_path, copy_id, self._main_task_id)
            if already_restore_size >= copy_total_size:
                self._progress = RoachRestoreJob.RoachRestoreProgress.TRANSFER_DATA_END
            elif copy_total_size > 0:
                self._progress = (already_restore_size / copy_total_size) * 100 * 0.9 + \
                                 RoachRestoreJob.RoachRestoreProgress.CHECK_RESTORE_CONDITION_END
            self._write_progress(self._construct_progress_st(already_restore_size))
            time.sleep(self._update_progress_interval)

    def _write_progress(self, progress_data):
        if not self._progress_file:
            _, cache_path = self._parse_restore_param_obj.get_available_path()
            if cache_path == "":
                log.error(
                    f"No cache path can be read or written.cache_path:{cache_path},main task:{self._main_task_id}")
                return
            self._progress_file = os.path.join(cache_path, "tmp",
                                               f"{RestoreProgressFileName.RESTORE_PROGRESS}"
                                               f"{self._parse_restore_param_obj.get_copy_id()}")
            if not os.path.exists(os.path.join(cache_path, "tmp")):
                exec_mkdir_cmd(os.path.join(cache_path, "tmp"))
        if not exec_overwrite_file(self._progress_file, progress_data):
            log.error(f"Update progress failed. main task:{self._main_task_id}")
        else:
            log.debug(f"Write restore progress({progress_data}) to self._progress_file successfully.")

    def _construct_progress_st(self, restored_data_size=0):
        log_detail = None
        hostname = DwsCommon(self._db_user, self._env_file).get_hostname()
        total_size = self._parse_restore_param_obj.get_copy_total_size()
        temp_logdetail = self._error_code if self._error_code != 0 else None
        if self._job_status == SubJobStatusEnum.FAILED:
            log_detail = LogDetail(logInfo="plugin_restore_subjob_fail_label", logInfoParam=[f"{self._sub_task_id}"],
                                   logTimestamp=int(time.time()), logLevel=3,
                                   logDetail=temp_logdetail,
                                   logDetailParam=self._log_detail_param)
        elif self._job_status == SubJobStatusEnum.RUNNING:
            log_detail = LogDetail(logInfo="dws_plugin_restore_subjob_running_label",
                                   logInfoParam=[hostname, self._sub_task_id, f"{total_size} MB",
                                                 f"{restored_data_size} MB"],
                                   logTimestamp=int(time.time()), logLevel=1)
        elif self._job_status == SubJobStatusEnum.COMPLETED:
            log_detail = LogDetail(logInfo="dws_plugin_restore_subjob_success_label",
                                   logInfoParam=[hostname, self._sub_task_id, f"{total_size} MB"],
                                   logTimestamp=int(time.time()), logLevel=1)
        progress_dict = SubJobDetails(taskId=self._main_task_id, subTaskId=self._sub_task_id,
                                      taskStatus=self._job_status.value, progress=self._progress,
                                      dataSize=restored_data_size * 1024, logDetail=[log_detail])
        return progress_dict.dict(by_alias=True)

    def _check_roach_client(self):
        log.info(f"Check roach client successfully. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
        return True

    def _select_available_port(self, start_port, end_port):
        for tmp_port in range(start_port, end_port):
            if not check_port_is_used(tmp_port):
                return tmp_port
            log.debug(f"Port {tmp_port} is used. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
        return 0

    def _construct_cmd_common_part(self):
        """
        --master-port：一个没有被使用的端口
        --metadata-destination：从副本元数据中获取
        --backup-key：从副本数据中获取
        --parallel-process：通过查询节点的DN数量来配置：DN数量*2
        如果恢复到新集群需要增加：
        -restore-new-cluster --restore-configs --old-cluster-hostname
        """
        self._master_port = self._select_available_port(DwsRoachPort.ROACH_PORT_START, DwsRoachPort.ROACH_PORT_END)
        if self._master_port <= 0:
            log.error(f"No available master port. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
            return ""
        backup_key = self._parse_restore_param_obj.get_backup_key()
        if not backup_key:
            log.error(f"No backup key. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
            return ""
        python_version = self._get_python_version()
        log.debug(f"Dws cluster version: {python_version}")
        roach_param = construct_roach_param()
        roach_cmd = f"{python_version} $GPHOME/script/GaussRoach.py -t restore --clean " \
                    f"--master-port {self._master_port} --media-destination nbu_policy --media-type NBU " \
                    f" --backup-key {backup_key} {roach_param} "

        # 是否恢复到新集群
        ret, restore_to_new = self._parse_restore_param_obj.is_restore_new_cluster()
        if not ret:
            return ""
        if restore_to_new:
            old_host_name = self._parse_restore_param_obj.get_backup_hostname()
            if not old_host_name:
                log.error(f"Fail to get old host name. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
                return ""
            roach_cmd += f" --restore-new-cluster --restore-configs --old-cluster-hostname {old_host_name}"
            metadata_des = f"/home/{self._db_user}/backup/{self._main_task_id}"
            roach_cmd += f" --metadata-destination {metadata_des}"
        else:
            metadata_des = self._parse_restore_param_obj.get_metadata_destination()
            if not metadata_des:
                log.error(f"No metadataPath. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
                return ""
            metadata_des = os.path.join(metadata_des, self._main_task_id)
            roach_cmd += f" --metadata-destination {metadata_des}"
        log.debug(f"Execute roach command {roach_cmd}")
        return roach_cmd

    def _construct_database_part(self):
        agent_port = self._select_available_port(self._master_port + 1, DwsRoachPort.ROACH_PORT_END)
        if agent_port <= 0:
            log.error(f"Fail to select agent port. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
            return ""
        # 获取要恢复的数据库名称
        db_name = self._parse_restore_param_obj.get_restore_object_name()
        if not db_name:
            log.error(f"Fail to get db name. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
            return ""
        if check_command_injection_exclude_quote(db_name):
            log.error(f"DB name {db_name} is invaild. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
            return ""
        self._db_name = db_name

        roach_cmd = f" --create --agent-port {agent_port} --dbname {db_name}"
        # 判断数据库是否存在，如果不存在要新建同名数据库
        if not self._create_not_exist_database(db_name):
            self._error_code = DwsErrorCode.NO_RESTORE_TARGET_DATABASE
            return ""
        return roach_cmd

    def _construct_table_part(self):
        """
        组装恢复到表的命令：包含普通恢复和细粒度恢复
        """
        # 获取要恢复的数据库名称，表名称
        db_name = ""
        target_table_list = []
        old_table_list = []
        restore_type = self._parse_restore_param_obj.get_restore_type()
        if restore_type == RestoreType.INVALID_RESTORE_TYPE:
            return ""
        if restore_type == RestoreType.NORMAL_RESTORE:
            db_name, target_table_list = self._parse_restore_param_obj.get_normal_restore_tables()
            target_table_list = self._check_table_in_copy(db_name, target_table_list)
            if not target_table_list:
                self._error_code = DwsErrorCode.NO_TABLE_IN_COPY
                return ""
        elif restore_type == RestoreType.FINE_GRAINED_RESTORE:
            db_name, target_table_list, old_table_list = \
                self._parse_restore_param_obj.get_fine_grained_restore_tables()
        log.debug(f"Database {db_name}, table list: {target_table_list}, old_table_list:{old_table_list}")
        if not db_name or not target_table_list:
            log.error(f"Fail to get table list. main task:{self._main_task_id}")
            return ""
        # 重名表校验
        if len(target_table_list) > len(set(target_table_list)):
            log.error(f"There exists duplicate table. main task: {self._main_task_id}")
            return ""
        self._db_name = db_name
        # 如果表归属的数据库不存在
        if not self._create_not_exist_database(db_name):
            self._error_code = DwsErrorCode.NO_RESTORE_SCHEMA_DATABASE
            self._log_detail_param = [f"{db_name}"]
            return ""
        if not self._is_schema_exist(db_name, target_table_list):
            return ""
        # 表归属的schema不存在
        # 将表名写入临时文件
        metadata_des = self._parse_restore_param_obj.get_metadata_destination()
        if not metadata_des:
            log.error(f"Fail to get metadata destination. main task:{self._main_task_id}")
            return ""
        copy_id = self._parse_restore_param_obj.get_copy_id()
        if not copy_id:
            log.error(f"Fail to get copy id. main task:{self._main_task_id}")
            return ""
        target_table_file = os.path.join("/home", self._db_user, f"target_table_list{copy_id}")
        old_table_file = os.path.join("/home", self._db_user, f"old_table_list{copy_id}")
        log.debug(f"Table list file {target_table_file}, old table list: {old_table_list}")
        processed_target_table_list = [process_table_name_in_table_file(table) for table in target_table_list]
        if not self._prepare_table_file(target_table_file, processed_target_table_list):
            return ""
        if not copy_file_into_sandbox(self._db_user, target_table_file, os.path.join('/home', self._db_user)):
            return ""
        # 如果是细粒度恢复
        if restore_type == RestoreType.FINE_GRAINED_RESTORE:
            processed_old_table_list = [process_table_name_in_table_file(table) for table in old_table_list]
            if not self.fine_grained_restore_prepare(old_table_file, processed_old_table_list):
                return ""
        # 组装命令
        return self._construct_table_cmd(target_table_file, db_name, len(target_table_list), old_table_file)

    def _construct_table_cmd(self, target_table_file, database_name, table_list_size, old_table_file):
        restore_type = self._parse_restore_param_obj.get_restore_type()
        # 普通恢复
        if restore_type == RestoreType.NORMAL_RESTORE:
            agent_port = self._select_available_port(self._master_port + 1, DwsRoachPort.ROACH_PORT_END)
            if agent_port <= 0:
                log.error(f"Fail to select agent port. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
                return ""
            if table_list_size > 1:  # 如果table数量大于1，要加--logical参数
                return f" --create --dbname {database_name} --table-list {target_table_file} " \
                       f"--agent-port {agent_port} --logical"
            return f" --create --dbname {database_name} --table-list {target_table_file} --agent-port {agent_port}"
        # 细粒度恢复
        copy_sub_type = self._parse_restore_param_obj.get_copy_sub_type()
        if not copy_sub_type:
            log.error(f"Fail to get copy subtype. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
            return ""
        cmd = ""
        # 物理备份副本和逻辑备份副本的细粒度恢复参数不一致。逻辑副本不支持恢复到新表
        if copy_sub_type == DwsSubType.DWS_TABLE or copy_sub_type == DwsSubType.DWS_DATABASE:
            agent_port = self._select_available_port(self._master_port + 1, DwsRoachPort.ROACH_PORT_END)
            if agent_port <= 0:
                log.error(f"Fail to select agent port. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
                return ""
            cmd = f" --create --dbname {database_name} --table-list {old_table_file}  --agent-port {agent_port} " \
                  f"--logical"
        elif copy_sub_type == DwsSubType.DWS_CLUSTER or copy_sub_type == DwsSubType.DWS_SCHEMA:
            cmd = f" --dbname {database_name} --table-list {old_table_file} " \
                  f"--physical-fine-grained --restore-target-list {target_table_file}"
        return cmd

    def _prepare_table_file(self, file_name, table_list):
        """
        用于恢复到表时，准备table-list参数，或者--nbu-media-list参数
        """
        self._write_data_into_file(file_name, table_list)
        if not exec_lchown_dir_recursively(file_name, self._db_user, get_group_name_by_os_user(self._db_user)):
            log.error(f"Change {file_name} to {self._db_user} failed. main task:{self._main_task_id}")
            return False
        change_path_permission(file_name, mode=PERMISSION_640)
        return True

    def _construct_schema_part(self):
        database_name, schema_name = self._parse_restore_param_obj.get_restore_schema()
        if not database_name or not schema_name:
            log.error(f"Fail to get database({database_name}) or schema({schema_name}). "
                      f"main task:{self._main_task_id}, subtask: {self._sub_task_id}")
            return ""
        self._db_name = database_name
        log.debug(f"Get target database({database_name}) or schema({schema_name}).")
        agent_port = self._select_available_port(self._master_port + 1, DwsRoachPort.ROACH_PORT_END)
        if agent_port <= 0:
            log.error(f"Fail to select agent port. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
            return ""
        # 判断数据库是否存在，如果不存在要新建同名数据库
        if not self._create_not_exist_database(database_name):
            self._error_code = DwsErrorCode.NO_RESTORE_SCHEMA_DATABASE
            self._log_detail_param = [f"{database_name}"]
            return ""

        if not self._check_schema_res_in_database(database_name, schema_name):
            self._error_code = DwsErrorCode.NO_RESTORE_TARGET_SCHEMA_IN_DATABASE
            self._log_detail_param = [f"{schema_name}"]
            return ""

        return f" --create --dbname {database_name} --schemaname {schema_name} --agent-port {agent_port}"

    def _get_nbu_list(self):
        _, cache_path = self._parse_restore_param_obj.get_available_path()
        copy_id = self._parse_restore_param_obj.get_copy_id()
        if not cache_path or not copy_id:
            log.error(f"Fail to get cache path({cache_path}) or copy id({copy_id}). "
                      f"main task:{self._main_task_id}. subtask: {self._sub_task_id}")
            return "", 0
        _, _, host_agent = self._parse_restore_param_obj.get_nodes_info()
        job_info = JobInfo(host_agents=host_agent, copy_id=copy_id, job_id=self._main_task_id)
        try:
            ret, ip_list, port = \
                ClusterBackup.check_roach_port(os.path.join(f'{cache_path}', 'tmp', copy_id, 'roach_client'), job_info)
        except ErrCodeException as err:
            self._error_code = err.get_log_detail().log_detail
            self._log_detail_param = err.get_log_detail().log_detail_param
            log.error(f"Catch ErrCodeException, error_code:{self._error_code}, " \
                      f"main task:{self._main_task_id}. subtask: {self._sub_task_id}.")
            return "", 0
        except Exception as err:
            log.error(f"Fail to get roach port for {err}. main task:{self._main_task_id}. subtask: {self._sub_task_id}")
            return "", 0
        log.debug(f"Roach ip list: {ip_list}, port: {port}")
        if not ret or not ip_list:
            log.error(f"Fail to get roach port. main task:{self._main_task_id}. subtask: {self._sub_task_id}")
            return "", 0
        # 将IP写入临时文件
        media_list_file = os.path.join("/home", self._db_user, f"mediaList{copy_id}")
        if not self._prepare_table_file(media_list_file, ip_list):
            return "", 0
        if not copy_file_into_sandbox(self._db_user, media_list_file, os.path.join('/home', self._db_user)):
            return "", 0
        return media_list_file, port

    def _construct_restore_cmd(self):
        """
        组装恢复命令
        """
        common_cmd = self._construct_cmd_common_part()
        log.debug(f"Common part : {common_cmd}")
        if not common_cmd:
            log.error(f"Fail to construct common part for task {self._main_task_id}.")
            return ""
        restore_target_sub_type = self._parse_restore_param_obj.get_restore_target_sub_type()
        if not restore_target_sub_type:
            log.error(f"Fail to get sub type for task {self._main_task_id}")
            return ""
        sub_type_cmd = ""
        log.debug(f"Restore target subtype is : {restore_target_sub_type}")
        if restore_target_sub_type == DwsSubType.DWS_DATABASE:
            sub_type_cmd = self._construct_database_part()
            if not sub_type_cmd:
                log.error(f"Fail to construct database command for task {self._main_task_id}, "
                          f"subtask: {self._sub_task_id}")
                return ""
        elif restore_target_sub_type == DwsSubType.DWS_SCHEMA:
            sub_type_cmd = self._construct_schema_part()
            if not sub_type_cmd:
                log.error(f"Fail to construct schema command for task {self._main_task_id}, "
                          f"subtask: {self._sub_task_id}")
                return ""
        elif restore_target_sub_type == DwsSubType.DWS_TABLE:
            sub_type_cmd = self._construct_table_part()
            if not sub_type_cmd:
                log.error(f"Fail to construct table command for task {self._main_task_id},subtask: {self._sub_task_id}")
                return ""
        common_cmd = common_cmd + " " + sub_type_cmd
        if self._parse_restore_param_obj.get_intrusive_mode() == IntrusiveMode.NON_INTRUSIVE_MODE:
            nbu_list_file, client_port = self._get_nbu_list()
            if not nbu_list_file:
                log.error(f"NBU list file is empty for task {self._main_task_id}, subtask: {self._sub_task_id}.")
                return ""
            tuple_text = (common_cmd, f" --nbu-on-remote --nbu-media-list {nbu_list_file} --client-port {client_port}")
            common_cmd = "".join(tuple_text)
        return common_cmd

    def _analyze_restore_result(self, output):
        log.info(f"Start to analyze restore output: {output}")
        output_list = output.split("\n")
        if not output_list or len(output_list) == 0:
            return False
        for single_line in output_list:
            log.debug(f"Each result line : {single_line}")
            if "[MASTER]" in single_line and "Restore" in single_line and "SUCCESSFUL" in single_line:
                return True
        log.error(f"Execute restore cmd failed. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
        return False

    def _check_agents_enough(self):
        """
        如果是侵入式模式，检查代理主机数量与集群节点数量是否一致
        """
        # 获取要恢复的目标集群的节点列表
        current_nodes_list = DwsCluster.get_all_cluster_node_ip(self._db_user, self._env_file)

        # 获取恢复参数中的集群节点列表
        ret, intrusive_nodes, _ = self._parse_restore_param_obj.get_nodes_info()
        if not ret or len(current_nodes_list) > len(intrusive_nodes):
            log.error(f"Agents is not enough. ret:{ret}, len(current_nodes_list):{len(current_nodes_list)}, "
                      f"len(intrusive_nodes):{len(intrusive_nodes)}."
                      f"main task:{self._main_task_id}, subtask: {self._sub_task_id}")
            return False

        log.info(f"Agents is enough. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
        return True

    def _start_cluster(self):
        """
        集群级恢复后，重启集群.
        """
        # 只有集群级才执行
        copy_sub_type = self._parse_restore_param_obj.get_restore_target_sub_type()
        if not copy_sub_type:
            log.error(f"Fail to get sub type for task {self._main_task_id}")
            return False
        if copy_sub_type != DwsSubType.DWS_CLUSTER:
            log.info(f"It's not a cluster restore({copy_sub_type}), do not restart cluster.")
            return True
        log.info("Waiting to start the cluster.")
        # 在实际分析问题中，恢复完成马上启动，偶现会启动失败，但是第二次又能启动成功，所以在此等待30秒，观察效果
        time.sleep(30)
        log.info("Start to start the cluster.")
        # 获取python版本
        python_version = self._get_python_version()
        # 判断是否是恢复到新集群
        ret, restore_to_new = self._parse_restore_param_obj.is_restore_new_cluster()
        if ret and restore_to_new:
            start_cmd = f"{python_version} $GPHOME/script/GaussRoach.py -t start --restore-new-cluster " \
                        f"-X /opt/huawei/mppdb-install-config.xml"
        else:
            start_cmd = f"{python_version} $GPHOME/script/GaussRoach.py -t start"

        ret = False
        try:
            ret = self._exec_start_cluster_cmd(start_cmd)
        except Exception as e:
            log.error(f"Fail to execute {start_cmd} for output: {e}."
                      f"main task:{self._main_task_id},subtask: {self._sub_task_id}")

        if not ret:
            self._error_code = DwsErrorCode.START_CLUSTER_FAILED
            return False

        log.info(f"Start cluster successfully.")
        return True

    def _execute_restore(self, cmd):
        progress_thread = threading.Thread(name="progress", target=self._update_progress_periodically)
        progress_thread.start()
        log.info(f"Start progress thread.main task: {self._main_task_id}, subtask:{self._sub_task_id}")
        return_code, ret_output = dws_exec_cmd_by_type(self._db_user, self._env_file, cmd)
        if not return_code:
            log.error(f"Restore failed {cmd}. {ret_output}, task:{self._main_task_id},subtask: {self._sub_task_id}")
            self._analyze_start_cluster_result(ret_output, self._parse_restore_param_obj.get_restore_target_sub_type())
            return False
        # 解析结果
        restore_ret = self._analyze_restore_result(ret_output)
        if not restore_ret:
            log.error(f"Execute restore subtask {self._sub_task_id} failed.")
            self._error_code = DwsErrorCode.ROACH_RESTORE_CMD_EXEC_FAILED
            return False
        log.info(f"Execute restore cmd successfully.main task: {self._main_task_id}, subtask:{self._sub_task_id}")
        return True

    def _analyze_start_cluster_result(self, err_info: str, dws_sub_type):
        """
        函数功能：分析回显错误码
        参数： err_info: 恢复命令回显错误信息
            type：恢复类型
        """
        if NO_TABLE_IN_DATABASE_ERR_MSG in err_info:
            if dws_sub_type == DwsSubType.DWS_SCHEMA:
                self._error_code = DwsErrorCode.NO_RESTORE_TARGET_TABLE_IN_SCHEMA_DATABASE
            elif dws_sub_type == DwsSubType.DWS_DATABASE:
                self._error_code = DwsErrorCode.NO_RESTORE_TARGET_TABLE_DATABASE
            self._log_detail_param = [f"{self._db_name}"]
        else:
            self._error_code = DwsErrorCode.ROACH_RESTORE_CMD_EXEC_FAILED

        log.error(f"Restore task err_code: {self._error_code}, main: {self._main_task_id}, sub: {self._sub_task_id}")

    def _get_python_version(self):
        dws_cluster = DwsCluster(self._db_user, self._env_file)
        if "8.0." not in dws_cluster.get_cluster_version():
            return "python3"
        return "python"

    def _write_data_into_file(self, target_file, table_list):
        table_list_str = ""
        for table in table_list:
            table_list_str += f"{table}\n"
        table_list_str.strip("\n")
        target_file = os.path.realpath(target_file)
        if os.path.islink(target_file):
            if not su_exec_rm_cmd(target_file):
                log.warn(f"Fail to remove {target_file}.")
        exec_overwrite_file(target_file, table_list_str, json_flag=False)
        log.info(f"Write data in {target_file} success. main job id: {self._main_task_id}")
        return True

    def _create_not_exist_database(self, db_name):
        """
        当恢复数据库，或者恢复数据库中的schema，或者恢复数据库中的表时，如果库不存在，要先创建库
        """
        log.info(f"Check database {db_name} exist.")
        dws_restore_obj = DwsDatabase(self._db_user, self._env_file)
        all_databases = dws_restore_obj.get_all_databases().split(",")
        db_exist = False
        for database in all_databases:
            log.debug(f"Database: {database}")
            if database == db_name:
                db_exist = True
                break
        if not db_exist:
            self._error_code = DwsErrorCode.NO_RESTORE_TARGET_DATABASE
            return False
        return True

    def _is_schema_exist(self, db_name, table_list):
        """
        当恢复表时，判断表归属的schema是否存在
        """
        dws_cluster = DwsCluster(self._db_user, self._env_file)
        if "8.0." not in dws_cluster.get_cluster_version():
            log.info("DWS 8.1.x and upper version support restore to table when schema exists.")
            return True
        all_schemas = QueryRes(self._db_user, self._env_file).get_all_schemas(db_name)
        not_exist_schema = set()
        for table in table_list:
            schema_name = "" if not table.split('.') else table.split('.')[0]
            if schema_name and schema_name not in all_schemas:
                log.warn(f"Schema {schema_name} not exist.")
                not_exist_schema.add(schema_name)
        if not_exist_schema:
            log.error(f"Schema: {not_exist_schema} not exist.")
            self._error_code = DwsErrorCode.RESTORE_SCHEMA_NOT_EXIST
            self._log_detail_param = [f"{list(not_exist_schema)}", db_name]
            return False
        return True

    def _clean_restore_data(self):
        """
        清理掉恢复时产生的临时数据
        """
        # 清理恢复时创建的临时数据目录
        copy_id = self._parse_restore_param_obj.get_copy_id()
        target_table_list_file = os.path.realpath(os.path.join("/home", self._db_user, f"target_table_list{copy_id}"))
        self._clean_restore_data_remove(target_table_list_file, self._main_task_id)
        old_table_list_file = os.path.realpath(os.path.join("/home", self._db_user, f"old_table_list{copy_id}"))
        self._clean_restore_data_remove(old_table_list_file, self._main_task_id)
        # 清理表非侵入式恢复时的--nbu-media-list文件
        media_list = os.path.realpath(os.path.join("/home", self._db_user, f"mediaList{copy_id}"))
        self._clean_restore_data_remove(media_list)
        dws_config = get_dws_config()
        deploy_type = int(dws_config.get("deployType", DwsDeployType.DEPLOY_IN_SERVER.value))
        if deploy_type == DwsDeployType.DEPLOY_IN_SANDBOX.value:
            ret, output = dws_exec_cmd_by_type(self._db_user, self._env_file,
                                               cmd_format("rm {}", media_list))
            if not ret:
                log.warning(f"Fail to remove media_list_file for {output}")
            ret, output = dws_exec_cmd_by_type(self._db_user, self._env_file,
                                               cmd_format("rm {}", target_table_list_file))
            if not ret:
                log.warning(f"Fail to remove {target_table_list_file} for {output}")
            ret, output = dws_exec_cmd_by_type(self._db_user, self._env_file,
                                               cmd_format("rm {}", old_table_list_file))
            if not ret:
                log.warning(f"Fail to remove {old_table_list_file} for {output}")
        # 清理恢复时的metadata-destination
        ret, restore_to_new = self._parse_restore_param_obj.is_restore_new_cluster()
        if not ret:
            log.error("Fail to get is restore to new cluster flag")
            return
        if restore_to_new:
            rm_dir = f"/home/{self._db_user}/backup/{self._main_task_id}"
        else:
            metadata_des = self._parse_restore_param_obj.get_metadata_destination()
            if not metadata_des:
                log.error(f"No metadataPath. main task:{self._main_task_id}, subtask: {self._sub_task_id}")
                return
            rm_dir = os.path.join(metadata_des, self._main_task_id)
        if os.path.islink(rm_dir) or check_command_injection_exclude_quote(rm_dir):
            log.error(f"Dir {rm_dir} is invalid")
            return
        rm_cmd = f"gs_ssh -c \\\"rm -rf {rm_dir}\\\""
        log.info(f"Remove metadata des cmd: {rm_cmd}")
        return_code, out_out = dws_exec_cmd_by_type(self._db_user, self._env_file, rm_cmd)
        if not return_code:
            log.warn(f"Fail to remove metadata des:{out_out} in {rm_dir}.main task {self._main_task_id}")

    def _clean_restore_data_remove(self, input_path, task_id=None):
        """
        _clean_restore_data行数太多，故增加此函数。用于执行os.remove()
        """
        rm_path = os.path.realpath(input_path)
        if os.path.isfile(rm_path):
            if not su_exec_rm_cmd(rm_path):
                if task_id is None:
                    log.warn(f"Fail to remove {rm_path}.")
                else:
                    log.warn(f"Fail to remove table list. main task {self._main_task_id}")
