#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import threading
import time
import uuid
from collections import namedtuple
from enum import Enum
from queue import Queue
import pwd

from dws.commons.common import log, check_path_valid, check_sh_cmd_param, check_sql_cmd_param, \
    get_gds_process_param, is_proxy_node, is_gds_same_user, is_gds_bind_unique_dir, dws_exec_array_cmd_by_type, \
    umount_all_gds_data_dir, select_available_port, process_table_name, \
    check_sh_cmd_param_for_schema_and_table, check_sql_cmd_param_for_schema_and_table
from common.file_common import change_path_permission, exec_lchown
from common.util.exec_utils import exec_mkdir_cmd, exec_mount_cmd, exec_umount_cmd, exec_overwrite_file
from common.util.common_utils import get_group_name_by_os_user
import psutil
from common.common_models import SubJobDetails, LogDetail
from common.const import SubJobStatusEnum, DBLogLevel
from dws.backup.cluster_backup import ClusterBackup
from dws.commons.const import BackupResType, IntrusiveMode, CopyGenerateType, DwsBackupLabel, \
    PERMISSION_755, PERMISSION_700, PERMISSION_600, DwsRoachPort, DwsRetEnum
from dws.commons.function_tool import log_start, out_result, progress_notify, get_register_ip, thread_lock_func
from dws.commons.job_info import JobInfo
from dws.commons.progress_notify import get_progress_path, read_file, write_file
from dws.commons.error_code import DwsErrorCode
from dws.commons.dws_exception import ErrCodeException
from dws.database.db_models import DwsIndex, GdsBackupMeta
from dws.database.db_sqlite import GdsBackupMetaFile, DWSIndexFile
from dws.resource.dws_common import DwsCommon


class TableBackupErrCode(int, Enum):
    # 没有错误
    NO_ERROR = 0
    # 备份的表全部不存在
    TABLE_ALL_NOT_EXIST = 1
    # 备份的表 不属于同一个数据库
    TABLE_NOT_BELONG_SAME_DB = 2
    # 其它错误
    UNKNOWN_ERROR = 3


class TableBackup(ClusterBackup):

    def __init__(self, pid):
        super().__init__(pid)
        self._table_queue = Queue()
        self._gds_backup_meta_db = None
        self._gds_params = []
        self._work_pool = []
        self._err_exit = False
        self._exception = None
        self._backup_tables = []
        self._finished_count = 0
        self._db_name = ""
        self._used_logical_ip = []
        self._list_path = ""

    @staticmethod
    @log_start()
    def is_table_exist(job_info: JobInfo, db_name: str, table_name: str):
        """
        判断表是否存在
        """
        processed_table_name = process_table_name(table_name)
        ret, _ = DwsCommon(job_info.usr, job_info.env_path).exec_gaussdb_sql_cmd(
            db_name, f"\d {processed_table_name};")
        return ret

    @staticmethod
    @log_start()
    def mount_gds_data_dir_to_fs(gds_data_dir: str, nfs_data_dir: str):
        log.info(f"NFS data dir: {nfs_data_dir}, gds dir: {gds_data_dir}.")
        # 参数校验
        if not check_path_valid(nfs_data_dir) or not check_sh_cmd_param(nfs_data_dir):
            log.error("Param nfs data dir invalid.")
            return False
        if not check_sh_cmd_param(gds_data_dir):
            log.error("Param gds data dir invalid.")
            return False
        TableBackup.umount_gds_data_dir(gds_data_dir)
        dir_usr_stat = os.stat(gds_data_dir)
        ret, _, err = exec_mount_cmd(nfs_data_dir, gds_data_dir)
        if ret != DwsRetEnum.SUCCESS:
            log.error(f"Mount nfs dir err: {err}.")
            return False

        user_name = pwd.getpwuid(dir_usr_stat.st_uid).pw_name
        change_path_permission(gds_data_dir, user_name=user_name, mode=PERMISSION_700)

        log.info(f"Successfully mount gds data dir to fs.")
        return True

    @staticmethod
    @log_start()
    def umount_gds_data_dir(gds_data_dir: str):
        log.info("Start umount gds data dir.")
        ret, _, err = exec_umount_cmd(gds_data_dir, "-l")
        if ret != DwsRetEnum.SUCCESS:
            log.warn(f"Un mount gds data dir [{gds_data_dir}] err: {err}.")
            return False

        log.info("Successfully umount gds data dir.")
        return True

    @staticmethod
    def get_all_tables_from_schema(db_name: str, schema_name: str, job_info: JobInfo):
        tables = []
        # 实际情况可以出现schema名称中出现特殊符号，但是双引号不允许出现
        if schema_name.find('"') != -1:
            log.error('The punctuation " is not allowed in schema names. '
                      f'The provided schema name is: {schema_name} Please verify the info provided')
            return tables
        common_obj = DwsCommon(job_info.usr, job_info.env_path, job_info.pid)
        query_cmd = f"select * from pg_tables where schemaname='{schema_name}';"
        log.debug(f"Execute sql command {query_cmd}.")
        ret, table_lines = common_obj.exec_gaussdb_sql_cmd(db_name, query_cmd)
        if not ret:
            log.error(f"Query table from schema({schema_name}, db({db_name}) failed.")
            return tables
        start_index = -1
        end_index = len(table_lines) - 1
        for index, cur_line in enumerate(table_lines):
            if "tablename" in cur_line:
                start_index = index + 2
                break
        if start_index == -1:
            log.error(f"Cannot find start flag in query table result, PID: {job_info.pid}.")
            return tables

        for index in range(start_index, end_index):
            temp_list = table_lines[index].split("|")
            if len(temp_list) < 3:
                continue
            table_name = temp_list[1].strip()
            if not check_sh_cmd_param_for_schema_and_table(table_name) or \
                not check_sql_cmd_param_for_schema_and_table(table_name):
                log.error(f"Invalid table name: [{table_name}], {job_info.log_format()}.")
                continue
            if table_name:
                tables.append(f"{schema_name}.{table_name}")
        log.debug(f"All tables : {tables}")
        return tables

    @staticmethod
    @log_start()
    def get_table_info(job_info: JobInfo):
        """
        从extednInfo中解析出数据库名称和所有的表名
        """
        exist_list = []
        not_exist_list = []
        tables_str = job_info.pro_obj_extend_info.get("table")
        RetSt = namedtuple("RetSt", ["ret", "db_name", "backup_tables", "not_exist_list"])
        if not tables_str:
            log.error(f"Get table string from extend info failed, {job_info.log_format()}.")
            return RetSt._make([TableBackupErrCode.UNKNOWN_ERROR, "", exist_list, not_exist_list])

        tables_list = filter(lambda x: x.strip(), tables_str.split(","))
        name_set = set()
        for table_str in tables_list:
            tmp_list = table_str.split('/')
            if len(tmp_list) != 3 and len(tmp_list) != 2:
                log.error(f"Split err, count: {len(tmp_list)}, {job_info.log_format()}.")
                return RetSt._make([TableBackupErrCode.UNKNOWN_ERROR, "", exist_list, not_exist_list])

            db_name = tmp_list[0].strip()
            # 外部参数校验
            if not check_sql_cmd_param(db_name) or not check_sh_cmd_param(db_name):
                log.error(f"Invalid database name: [{db_name}], {job_info.log_format()}.")
                return RetSt._make([TableBackupErrCode.UNKNOWN_ERROR, "", exist_list, not_exist_list])
            # 当用户全选了schema下所有表时，只会下发到schema这一层
            if len(tmp_list) == 2:
                all_tables = TableBackup.get_all_tables_from_schema(db_name, tmp_list[1].strip(), job_info)
                exist_list.extend(all_tables)
                name_set.add(db_name)
                continue
            table_name_tmp = f"{tmp_list[1].strip()}.{tmp_list[2].strip()}"
            # 外部参数校验
            if not check_sh_cmd_param_for_schema_and_table(table_name_tmp) or \
                not check_sql_cmd_param_for_schema_and_table(table_name_tmp):
                log.error(f"Invalid table name: [{table_name_tmp}], {job_info.log_format()}.")
                return RetSt._make([TableBackupErrCode.UNKNOWN_ERROR, "", exist_list, not_exist_list])

            sch_name = f"{tmp_list[1].strip()}"
            tab_name = f"{tmp_list[2].strip()}"
            table_name = f"{sch_name}.{tab_name}"

            if TableBackup.is_table_exist(job_info, db_name, table_name):
                name_set.add(db_name)
                exist_list.append(table_name)
            else:
                log.info(f"Table name [{table_name}] not exist in db [{db_name}], {job_info.log_format()}.")
                not_exist_list.append(table_name)

        if len(exist_list) == 0:
            log.error(f"Any of tables does not exist. {job_info.log_format()}")
            return RetSt._make([TableBackupErrCode.TABLE_ALL_NOT_EXIST, "", exist_list, not_exist_list])

        if len(name_set) != 1:
            log.error(f"Result err, name_set cnt: {len(name_set)}, " \
                      f"exist_list cnt:{len(exist_list)}, {job_info.log_format()}.")
            return RetSt._make([TableBackupErrCode.TABLE_NOT_BELONG_SAME_DB, "", exist_list, not_exist_list])

        return RetSt._make([TableBackupErrCode.NO_ERROR, name_set.pop(), exist_list, not_exist_list])

    @staticmethod
    @log_start()
    def clean_resource(job_info: JobInfo):
        if not is_proxy_node(job_info.nodes):
            TableBackup.clean_res_in_cluster(job_info)
        TableBackup.clean_res_in_host(job_info)

    @staticmethod
    @log_start()
    def clean_res_in_cluster(job_info: JobInfo):
        # 删除残留进程
        TableBackup.clean_threads(job_info)

        # 获取要备份表的信息
        table_info = TableBackup.get_table_info(job_info)
        ret = table_info.ret
        db_name = table_info.db_name
        table_list = table_info.backup_tables
        if ret != TableBackupErrCode.NO_ERROR:
            log.error(f"Get table info failed, {job_info.log_format()}.")
            return False

        # 删除外表
        TableBackup.clean_foreign_tables(job_info, db_name, table_list)
        return True

    @staticmethod
    @log_start()
    def clean_res_in_host(job_info: JobInfo):
        host_ip = get_register_ip(job_info.cluster_agents, job_info.host_agents)
        if not host_ip:
            log.error(f"Get host ip failed, {job_info.log_format()}.")
            return False
        # 获取GDS进程参数
        gds_param_path = os.path.join(job_info.cache_path, "tmp", job_info.copy_id, "gds", f"gds_{host_ip}.txt")
        if not os.path.isfile(gds_param_path):
            log.error(f"GDS param path [{gds_param_path}] not exists, {job_info.log_format()}.")
            return False

        gds_param_list = read_file(gds_param_path)
        for item in gds_param_list:
            TableBackup.umount_gds_data_dir(item.get('localDataDir'))
        return True

    @staticmethod
    @log_start()
    def clean_threads(job_info: JobInfo):
        # 删除残留进程
        insert_pids = DwsCommon.filter_pids("INSERT INTO")
        if not insert_pids:
            log.warn(f"No insert process found, {job_info.log_format()}.")
            return
        for pid in insert_pids:
            try:
                psutil.Process(pid).kill()
            except Exception as err:
                log.warn(f"Get process err: {err}.")
                continue
            log.info(f"kill process: {pid}, {job_info.log_format()}.")

    @staticmethod
    @log_start()
    def clean_foreign_tables(job_info: JobInfo, db_name: str, table_list: list):
        common_obj = DwsCommon(job_info.usr, job_info.env_path, job_info.pid)
        # 删除外表
        for table_name in table_list:
            foreign_table_name = f"foreign_{table_name.replace('.', '_')}".replace('"', '')
            sql_cmd = f"DROP FOREIGN TABLE IF EXISTS \"{foreign_table_name}\";"
            ret, _ = common_obj.exec_gaussdb_sql_cmd(db_name, sql_cmd)
            if ret:
                log.info(f"Drop foreign talbe [{foreign_table_name}] success, {job_info.log_format()}.")
            else:
                log.error(f"Drop foreign talbe [{foreign_table_name}] failed, {job_info.log_format()}.")

    @log_start()
    def report_not_exist_table(self, job_info: JobInfo, table_list: list):
        """
        上报警告，不存在的表
        """
        log_detail = [
            LogDetail(
                logInfo=DwsBackupLabel.BACKUP_TABLE_NOT_EXISTS_LABEL,
                logInfoParam=[",".join(table_list)],
                logLevel=DBLogLevel.WARN)
        ]
        self.report_job_details(
            job_info,
            SubJobDetails(
                taskId=job_info.job_id,
                subTaskId=job_info.sub_job_id,
                taskStatus=SubJobStatusEnum.RUNNING,
                progress=10,
                logDetail=log_detail).dict(by_alias=True))

    @log_start()
    def report_gds_count_warn(self, job_info: JobInfo, host_ip: str, gds_count: int, optimal_count: int):
        """
        上报警告，GDS进程个数小于推荐值
        """
        log_detail = [
            LogDetail(
                logInfo=DwsBackupLabel.GDS_COUNT_LESS_THAN_OPTIMAL_COUNT,
                logInfoParam=[host_ip, str(gds_count), str(optimal_count)],
                logLevel=DBLogLevel.WARN)
        ]
        self.report_job_details(
            job_info,
            SubJobDetails(
                taskId=job_info.job_id,
                subTaskId=job_info.sub_job_id,
                taskStatus=SubJobStatusEnum.RUNNING,
                progress=10,
                logDetail=log_detail).dict(by_alias=True))

    @log_start()
    def white_sub_job(self, job_info: JobInfo, intrusive_mode):
        """
        功能描述： 白名单子任务,每个节点都会执行
        参数：
        @job_info： 任务相关参数
        @intrusive_mode: 是不是侵入式
        返回值True
        """
        if job_info.backup_tool_type == CopyGenerateType.GENERATE_BY_GDS:
            return self.gds_white_sub_job(job_info, type(self).get_repo_list(job_info))
        return super().white_sub_job(job_info, intrusive_mode)

    def get_suitable_mount_path(self, mount_path_arr: list) -> str:
        """
        函数功能：为了性能最大化，优先使用不同逻辑端口的挂载点
        """
        if not mount_path_arr or not isinstance(mount_path_arr, list):
            log.error("Mount path array is empty.")
            return ""
        for path in mount_path_arr:
            tmp_local_ip = os.path.basename(os.path.abspath(path))
            if tmp_local_ip in self._used_logical_ip:
                continue
            self._used_logical_ip.append(tmp_local_ip)
            return path

        return mount_path_arr[0]

    @log_start()
    def distribute_file_system(self, job_info: JobInfo, host_ip: str, fs_list: list, gds_param_list: list):
        # 判断文件系统个数和GDS进程个数
        gds_mount_fs_info = []
        log.info(f"Filesystem count: {len(fs_list)}, GDS process count: {len(gds_param_list)}" \
            f", {job_info.log_format()}.")
        if not type(self).create_data_rep_2(job_info, host_ip, gds_param_list[0].get("user")):
            log.error(f"Create data reps failed, {job_info.log_format()}.")
            return False, []
        self._used_logical_ip.clear()
        occur_err = False
        # 为每个GDS进程绑定一个文件系统
        for fs_dict, gs_param in zip(fs_list, gds_param_list):
            tmp_local_path = gs_param.get("dataDir")
            tmp_fs_path = os.path.join(self.get_suitable_mount_path(fs_dict.get("mountPath")), \
                "data", job_info.copy_id, host_ip)
            if not self.mount_gds_data_dir_to_fs(tmp_local_path, tmp_fs_path):
                log.error(f"Mount gds data dir to dorado filesystem failed, {job_info.log_format()}.")
                occur_err = True
                break
            gds_mount_fs_info.append({
                "hostIP": host_ip,
                "user": gs_param.get("user"),
                "esn": fs_dict.get("x8000_esn"),
                "fsId": fs_dict.get("id"),
                "ipPort": gs_param.get("ipPort"),
                "localDataDir": tmp_local_path,
                "concurrentNum": gs_param.get("concurrent")
            })

        # 产生错误要清理已经绑定的文件系统
        if occur_err:
            for item in gds_mount_fs_info:
                self.umount_gds_data_dir(item.get('localDataDir'))
            return False, []
        return True, gds_mount_fs_info

    @log_start()
    @out_result()
    @progress_notify(fail_lab=DwsBackupLabel.WHITE_SUB_JOB_FAILED_LABEL)
    def gds_white_sub_job(self, job_info: JobInfo, repo_list: list):
        """
        说明：GDS的白名单子任务会在多个节点上执行，单个节点执行失败，直接返回True，这样不影响其他节点，
        因为任何一个节点执行成功，任务就能继续执行
        """
        # 获取所有GDS进程参数信息
        ret, gds_param_list = get_gds_process_param()
        if not ret:
            log.warn(f"Get all GDS process param failed, {job_info.log_format()}.")
            # 没有正确获取到GDS进程的参数也应该返回成功，因为别的白名单子任务可能正确获取到了
            return True

        host_ip = get_register_ip(job_info.cluster_agents, job_info.host_agents)
        if not host_ip:
            log.warn(f"Get host ip failed, {job_info.log_format()}.")
            return True

        fs_list = []
        for x8000 in repo_list:
            file_systems = x8000.get("filesystems", [])
            for item in file_systems:
                item["x8000_esn"] = x8000.get("deviceSN")
                fs_list.append(item)
        ret, gds_mount_fs_info = self.distribute_file_system(job_info, host_ip, fs_list, gds_param_list)
        if not ret:
            log.warning(f"Distribute file system to gds process failed, {job_info.log_format()}.")
            return True
        # 将信息写入cache仓中
        gds_info_path = os.path.join(job_info.cache_path, "tmp", job_info.copy_id, \
            "gds", f"gds_{host_ip}.txt")
        if not check_path_valid(gds_info_path):
            log.warning(f"Invalid gds info path [{gds_info_path}], {job_info.log_format()}.")
            return True
        write_file(gds_info_path, json.dumps(gds_mount_fs_info))
        return True

    @log_start()
    @progress_notify(DwsBackupLabel.BACKUP_SUBJOB_SUC_LABEL, DwsBackupLabel.BACKUP_SUBJOB_FAILED_LABEL)
    def sub_job_backup(self, job_info: JobInfo, intrusive_mode):
        """
        函数功能：table备份子任务
        """
        if not self._check_table_state(job_info):
            log.error(f"Check state Failed. {job_info.log_format()}")
            return False

        if job_info.backup_tool_type == CopyGenerateType.GENERATE_BY_GDS:
            return self.gds_sub_job_backup(job_info)
        return super().sub_job_backup(job_info, intrusive_mode)

    @log_start()
    def post_sub_job(self, job_info: JobInfo, intrusive_mode):
        if job_info.backup_tool_type == CopyGenerateType.GENERATE_BY_GDS:
            return self.gds_post_sub_job(job_info)
        return super().post_sub_job(job_info, intrusive_mode)

    def construct_cmd(self, job_info: JobInfo, intrusive_mode, port):
        log.info(f"Table, extend_info: {job_info.pro_obj_extend_info}, {job_info.log_format()}")

        self._list_path = os.path.join("/home", job_info.usr, "table_list_temp.txt")
        if not check_path_valid(self._list_path, "/home/"):
            log.error(f"Table list path [{self._list_path}] invalid, {job_info.log_format()}.")
            raise Exception("Table list path invalid")
        if not self._create_table_list_file(job_info, self._list_path):
            log.error(f"Create table list file failed, {job_info.log_format()}.")
            raise Exception("Create table list file failed")
        master_port = select_available_port(DwsRoachPort.ROACH_PORT_START, DwsRoachPort.ROACH_PORT_END)
        if not master_port:
            log.error(f"Master port {master_port} can not use.")
            raise Exception(f"Master port {master_port} can not use")
        agent_port = select_available_port(master_port + 1, DwsRoachPort.ROACH_PORT_END)
        if not agent_port:
            log.error(f"Agent port {agent_port} can not use.")
            raise Exception(f"Agent port {agent_port} can not use")
        backup_cmd = f"{self._python_version} $GPHOME/script/GaussRoach.py -t backup --master-port {master_port} " \
                     f"--media-type NBU --media-destination nbu_policy --filesplit-size 256 " \
                     f"--metadata-destination {job_info.metadata_destination} --agent-port {agent_port} " \
                     f"--logical --parallel-process {self._process_count} " \
                     f"--dbname {self._db_name} --table-list {self._list_path} "

        if intrusive_mode == IntrusiveMode.NON_INTRUSIVE_MODE:
            backup_cmd += f" --nbu-on-remote --nbu-media-list /home/{job_info.usr}/media_list_file.txt " \
                          f"--client-port {port}"

        log.debug(f"backup_cmd : {backup_cmd}, {job_info.log_format()}")
        return backup_cmd

    @log_start()
    def gds_save_index_info(self, job_info: JobInfo):
        """
        函数功能：存储细粒度信息，用于恢复用
        """
        try:
            self._db = DWSIndexFile(f"sqlite:///{ClusterBackup.get_index_file_path(job_info)}")
        except Exception as e:
            log.error(f'Err: {e}, job_info: {job_info.log_format()}')
            raise e
        self.gds_save_cluster_info(job_info)

        # 修改copymetadata.sqlite文件权限为root:root,600
        change_path_permission(ClusterBackup.get_index_file_path(job_info), user_name="root", mode=PERMISSION_600)
        return True

    @log_start()
    def gds_save_cluster_info(self, job_info: JobInfo):
        """
        功能描述：保存table，database等信息
        """
        log.info("Gds save cluster info.")
        cluster_uuid = str(uuid.uuid1())
        cluster_name = job_info.protect_env.get('name')
        self._db.insert_record(
            DwsIndex(UUID=cluster_uuid,
                     NAME=cluster_name,
                     TYPE=BackupResType.CLUSTER.value,
                     PARENT_PATH='/',
                     PARENT_UUID=''))

        self.gds_save_database_info(job_info, cluster_uuid, '/' + cluster_name)

    @log_start()
    def gds_save_database_info(self, job_info: JobInfo, parent_uuid: str, parent_path: str):
        database_uuid = str(uuid.uuid1())
        db_name = job_info.pro_obj.get('parentName')
        self._db.insert_record(
            DwsIndex(UUID=database_uuid,
                     NAME=db_name,
                     TYPE=BackupResType.DATABASE.value,
                     PARENT_PATH=parent_path,
                     PARENT_UUID=parent_uuid))
        self.gds_save_schema_info(job_info, db_name, database_uuid, f'{parent_path}/{db_name}')

    @log_start()
    def gds_save_schema_info(self, job_info: JobInfo, database_name: str, parent_uuid: str, parent_path: str):
        schemas_table = {}
        # 解析schema和table信息
        # schema和table有映射关系
        for table in self._backup_tables:
            schema_name, table_name = table.replace('"', '').split(".")
            old_tables = schemas_table.get(schema_name, [])
            old_tables.append(table_name)
            schemas_table.update({schema_name: old_tables})

        for schema_name in schemas_table.keys():
            tables = schemas_table.get(schema_name, [])
            schema_uuid = str(uuid.uuid1())
            self._db.insert_record(
                DwsIndex(UUID=schema_uuid,
                         NAME=schema_name,
                         TYPE=BackupResType.SCHEMA.value,
                         PARENT_PATH=parent_path,
                         PARENT_UUID=parent_uuid))
            for table in tables:
                self._db.insert_record(
                    DwsIndex(
                        UUID=str(uuid.uuid1()),
                        NAME=table,
                        TYPE=BackupResType.TABLE.value,
                        PARENT_PATH=f'{parent_path}/{schema_name}',
                        PARENT_UUID=schema_uuid))

    @log_start()
    def get_appropriate_gds_param(self, job_info: JobInfo, gds_params_arr: list):
        ret_gds_param = None
        suc_host_ip = None
        err_host_ip = None
        suc_max_count = 0
        err_max_count = 0
        err_type = DwsErrorCode.NO_ERR
        for item in gds_params_arr:
            host_ip = item.get("hostIp")
            single_param = item.get("data")
            cur_count = len(single_param)
            if not is_gds_same_user(single_param):
                log.warn(f"GDS process in agent [{host_ip}] started in different user.")
                if cur_count > err_max_count:
                    err_host_ip = host_ip
                    err_max_count = cur_count
                    err_type = DwsErrorCode.GDS_START_USER_NOT_SAME
                continue
            if not is_gds_bind_unique_dir(single_param):
                log.warn(f"GDS process in agent [{host_ip}] bind the same data dir.")
                if cur_count > err_max_count:
                    err_host_ip = host_ip
                    err_max_count = cur_count
                    err_type = DwsErrorCode.GDS_BIND_SAME_DATA_DIR
                continue

            if cur_count > suc_max_count:
                suc_host_ip = host_ip
                suc_max_count = cur_count
                ret_gds_param = single_param

        fs_count = len(job_info.data_reps)
        if ret_gds_param and suc_max_count < fs_count:
            log.warn(f"GDS process count [{suc_max_count}] is less than " \
                f"file system count [{fs_count}], {job_info.log_format()}.")
            self.report_gds_count_warn(job_info, suc_host_ip, suc_max_count, fs_count)

        if not ret_gds_param:
            log.error(f"Cannot find appropriate gds param in all agents, {job_info.log_format()}.")
            raise ErrCodeException(
                log_detail=LogDetail(
                    logDetail=err_type,
                    logDetailParam=[err_host_ip],
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))

        return ret_gds_param

    @log_start()
    def get_gds_param_from_dir(self, job_info: JobInfo):
        gds_params_arr = []
        gds_data_dir = os.path.join(job_info.cache_path, "tmp", job_info.copy_id, "gds")
        for file_name in os.listdir(gds_data_dir):
            full_name = os.path.join(gds_data_dir, file_name)
            if file_name.startswith("gds_") and os.path.isfile(full_name):
                tmp_params = None
                try:
                    tmp_params = read_file(full_name)
                except Exception as err:
                    log.error(f"Read GDS param path failed with err: {err}, {job_info.log_format()}.")
                    continue
                host_ip = os.path.splitext(file_name)[0].split("_")[1]
                gds_params_arr.append({"hostIp": host_ip, "data": tmp_params})

        if not gds_params_arr:
            log.error(f"No gds param file in path [{gds_data_dir}], {job_info.log_format()}.")
            raise ErrCodeException(
                log_detail=LogDetail(
                    logDetail=DwsErrorCode.GDS_PROCESS_NOT_EXIST,
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))

        return self.get_appropriate_gds_param(job_info, gds_params_arr)

    @log_start()
    @progress_notify(DwsBackupLabel.BACKUP_SUBJOB_SUC_LABEL, DwsBackupLabel.BACKUP_SUBJOB_FAILED_LABEL)
    def gds_sub_job_backup(self, job_info: JobInfo):
        """
        GDS备份
        """
        write_file(get_progress_path(job_info.copy_id, job_info.sub_job_id, job_info.cache_path),
            json.dumps(SubJobDetails(taskId=job_info.job_id,
                              subTaskId=job_info.sub_job_id,
                              taskStatus=SubJobStatusEnum.RUNNING.value,
                              progress=5).dict(by_alias=True)))
        # 获取GDS进程参数
        self._gds_params = self.get_gds_param_from_dir(job_info)
        if not self._gds_params:
            log.error("Get gds process params failed.")
            return False

        # 导出表的结构
        if not self._create_table_sql_file(job_info):
            log.error(f"Create table sql file failed, {job_info.log_format()}.")
            return False

        if not self._fill_table_queue(job_info):
            log.error("Fill table queue failed.")
            return False

        # 开始执行gds备份
        self._err_exit = False
        if not self._create_work_pool(job_info):
            log.error(f"Work pool failed, {job_info.log_format()}.")
            return False

        if not self.update_metadata("gds", job_info):
            log.error(f"Update metadata failed, {job_info.log_format()}.")
            return False

        try:
            self.gds_save_index_info(job_info)
        except Exception as e:
            log.error(f'Index file err: {e}')
            return False

        return True

    @log_start()
    @out_result()
    @progress_notify()
    def gds_post_sub_job(self, job_info: JobInfo):
        """
        GDS后置清理子任务
        """
        TableBackup.clean_resource(job_info)
        umount_all_gds_data_dir()
        return True

    @log_start()
    def _create_work_pool(self, job_info: JobInfo):
        gds_t_count = 0
        for item in self._gds_params:
            gds_t_count += item.get("concurrentNum")

        log.info(f"Concurrent threads count: {gds_t_count}, {job_info.log_format()}.")
        self._finished_count = 0
        for _ in range(gds_t_count):
            worker = threading.Thread(target=self._work_exec, args=(job_info,))
            worker.start()
            self._work_pool.append(worker)
        log.info(f"Main process wait ... {job_info.log_format()}.")
        for item in self._work_pool:
            item.join()

        log.info(f"Main process wait finished, {job_info.log_format()}.")

        if self._err_exit and self._exception:
            log.error(f'Gds err: {self._exception}')
            raise self._exception

        return not self._err_exit

    @log_start()
    def _create_table_list_file(self, job_info: JobInfo, path):
        """
        创建table list的参数文件
        """
        self._write_table_to_file(path, self._backup_tables)
        path = os.path.realpath(path)
        if not exec_lchown(path, job_info.usr, get_group_name_by_os_user(job_info.usr)):
            log.error(f"Change table file [{path}] owner failed, {job_info.log_format()}.")
            return False

        return True

    @log_start()
    def _create_gs_dump_dir(self, job_info: JobInfo):
        gs_dump_path = os.path.join(job_info.meta_path, 'meta', job_info.copy_id, 'gsdump')
        if not os.path.exists(gs_dump_path):
            exec_mkdir_cmd(gs_dump_path)

        change_path_permission(gs_dump_path, user_name=job_info.usr, mode=PERMISSION_755)

    @log_start()
    def _create_table_sql_file(self, job_info: JobInfo):
        """
        导出创建所有表的sql文件
        """
        self._create_gs_dump_dir(job_info)
        common_obj = DwsCommon(job_info.usr, job_info.env_path, self._pid)
        db_port = common_obj.get_db_port()
        if not db_port:
            log.error(f"Get db port failed, {job_info.log_format()}.")
            return False

        gs_dump_path = os.path.join(job_info.meta_path, 'meta', job_info.copy_id, 'gsdump')
        for table_name in self._backup_tables:
            tmp_list = table_name.replace('"', '').split('.')
            sch_deal_name = f"{tmp_list[0].strip()}"
            tab_deal_name = f"{tmp_list[1].strip()}"
            tab_name = f"{sch_deal_name}.{tab_deal_name}"
            processed_tab_name = f"\"{sch_deal_name}\".\"{tab_deal_name}\""

            file_path = os.path.join(gs_dump_path, f"{tab_name}.sql")
            create_cmd = f"gs_dump -f '{file_path}' -p {db_port} {self._db_name} -t '{processed_tab_name}' -F p -s"
            ret, output = dws_exec_array_cmd_by_type(job_info.usr, job_info.env_path, create_cmd)
            if not ret:
                log.error(f"Generate sql file {file_path} err: {output}, {job_info.log_format()}.")
                return False

        log.info(f"Generate sql file suc, {job_info.log_format()}.")
        return True

    @log_start()
    def _fill_table_queue(self, job_info: JobInfo):
        """
        说明：将所有表根据GDS进程的-t参数进行分发
        举例：三个GDS进程的-t参数为 1 3 2，一共有10张表
        第1张表：给进程1，此时pro_t_param数组为0 3 2
        第2张表：给进程2，此时pro_t_param数组为0 2 2
        第3张表：给进程3，此时pro_t_param数组为0 2 1
        第4张表：给进程1，此时pro_t_param数组为0 1 1
        第5张表：给进程2，此时pro_t_param数组为0 1 0
        ......
        """

        def reset_pro_t_param():
            pro_t_param.clear()
            for item in self._gds_params:
                pro_t_param.append(item.get("concurrentNum"))

        def is_all_zero():
            for item in pro_t_param:
                if item:
                    return False
            return True

        pro_t_param = []
        cur_pro_index = 0
        for table in self._backup_tables:
            if is_all_zero():
                reset_pro_t_param()
            while not pro_t_param[cur_pro_index]:
                cur_pro_index = (cur_pro_index + 1) % len(pro_t_param)
            self._table_queue.put({"tableName": table, "gdsParam": self._gds_params[cur_pro_index]})
            # 保存表存放位置
            if not self._record_table_position(job_info, table, self._gds_params[cur_pro_index]):
                log.error(f"Record table position failed, {job_info.log_format()}.")
                return False
            pro_t_param[cur_pro_index] -= 1
            cur_pro_index = (cur_pro_index + 1) % len(pro_t_param)

        return True

    @log_start()
    def _work_exec(self, job_info: JobInfo):
        """
        工作线程执行函数
        """
        try:
            self._run_gds_backup(job_info)
        except ErrCodeException as err:
            log.exception(f"Catch ErrCodeException, {job_info.log_format()}.")
            self._err_exit = True
            self._exception = err
        except Exception as err:
            log.exception(f"Run gds backup err: {err}, {job_info.log_format()}.")
            self._err_exit = True

    @log_start()
    def _run_gds_backup(self, job_info: JobInfo):
        """
        执行gds备份导出数据到数据目录
        """
        thread_info = threading.currentThread()
        log.info(f"Thread [{thread_info.ident}] start, {job_info.log_format()}.")
        common_obj = DwsCommon(job_info.usr, job_info.env_path, self._pid)
        while not self._table_queue.empty():
            backup_obj = self._table_queue.get()
            table_name = backup_obj.get("tableName").replace('"', '')
            gds_param = backup_obj.get("gdsParam")
            gds_ip_port = gds_param.get("ipPort")
            log.debug(f"Thread [{thread_info.ident}] get table name [{table_name}], {job_info.log_format()}.")
            foreign_table_name = f"foreign_{table_name.replace('.', '_')}".replace('"', '')
            processed_table_name = process_table_name(table_name)
            # 删除外表
            sql_cmd = f"DROP FOREIGN TABLE IF EXISTS \"{foreign_table_name}\";"
            ret, _ = common_obj.exec_gaussdb_sql_cmd(self._db_name, sql_cmd)
            log.debug(f"Execute sql command: {sql_cmd}")
            if not ret:
                log.error(f"Drop foreign talbe failed, thread id: {thread_info.ident}, {job_info.log_format()}.")
                self._err_exit = True
                return False
            # 创建外表
            sql_cmd = f"CREATE FOREIGN TABLE \"{foreign_table_name}\"(like {processed_table_name}) " \
                      f"SERVER gsmpp_server OPTIONS (LOCATION \'gsfs://{gds_ip_port}/\', " \
                      f"FORMAT \'text\', DELIMITER \',\', EOL \'0x0a\') WRITE ONLY;"
            ret, _ = common_obj.exec_gaussdb_sql_cmd(self._db_name, sql_cmd)
            log.debug(f"Execute sql command: {sql_cmd}")
            if not ret:
                log.error(f"Create foreign talbe failed, thread id: {thread_info.ident}, {job_info.log_format()}.")
                self._err_exit = True
                return False

            # 插入数据
            sql_cmd = f"INSERT INTO \"{foreign_table_name}\" select * from {processed_table_name};"
            ret, _ = common_obj.exec_gaussdb_sql_cmd(self._db_name, sql_cmd)
            log.debug(f"Execute sql command: {sql_cmd}")
            if not ret:
                log.error(f"Insert into foreign talbe failed, thread id: {thread_info.ident}, "\
                    f"{job_info.log_format()}.")
                self._err_exit = True
                raise ErrCodeException(
                    log_detail=LogDetail(
                        logDetail=DwsErrorCode.GDS_EXPORT_DATA_FAILED,
                        logDetailParam=[get_register_ip(job_info.cluster_agents, job_info.host_agents)],
                        logTimestamp=int(time.time()),
                        logLevel=DBLogLevel.ERROR))

            # 更新进度
            self._finished_count += 1
            self._update_process(job_info)

        log.info(f"Thread [{thread_info.ident}] finished, {job_info.log_format()}.")
        return True

    @log_start()
    @thread_lock_func(threading.Lock())
    def _update_process(self, job_info: JobInfo):
        host_ip = get_register_ip(job_info.cluster_agents, job_info.host_agents)
        if not host_ip:
            log.error(f"Get host ip failed, {job_info.log_format()}.")
            return False
        progress_path = get_progress_path(job_info.copy_id, job_info.sub_job_id, job_info.cache_path)
        if not check_path_valid(progress_path):
            log.error(f"Invalid progress path [{progress_path}], {job_info.log_format()}.")
            return False
        try:
            write_file(
                progress_path,
                json.dumps(
                    SubJobDetails(
                        taskId=job_info.job_id,
                        subTaskId=job_info.sub_job_id,
                        taskStatus=SubJobStatusEnum.RUNNING,
                        progress=int(self._finished_count / len(self._backup_tables) * 100),
                        logDetail=[
                            LogDetail(
                                logInfo=DwsBackupLabel.GDS_BACKUP_SUB_JOB_PROGESS,
                                logInfoParam=[host_ip,
                                              str(len(self._backup_tables)),
                                              str(self._finished_count)],
                                logLevel=DBLogLevel.INFO)
                        ]).dict(by_alias=True)))
        except Exception as err:
            log.error(f"Write progress file raise err: {err}, {job_info.log_format()}.")
            return False
        return True

    @log_start()
    def _record_table_position(self, job_info: JobInfo, table_name: str, gds_param: object):
        """
        记录表存放位置
        """
        if not self._gds_backup_meta_db and not self._init_gds_backup_meta_db(job_info):
            log.error(f"Init GDS backup meta db failed, {job_info.log_format()}.")
            return False
        tmp_list = table_name.split(".")
        if len(tmp_list) < 2:
            log.error(f"Split table name [{table_name}] failed, {job_info.log_format()}.")
            return False
        self._gds_backup_meta_db.insert_record(
            GdsBackupMeta(
                AGENT_IP=gds_param.get("hostIP"),
                DB_NAME=self._db_name,
                SCHEMA_NAME=table_name.split(".")[0],
                TABLE_NAME=table_name.split(".")[1],
                FS_DEVICE_ID=gds_param.get("esn"),
                FS_ID=gds_param.get("fsId")))

        return True

    @log_start()
    def _init_gds_backup_meta_db(self, job_info: JobInfo):
        """
        函数功能：初始化GDS备份元数据的db文件
        """
        db_file_path = os.path.join(job_info.meta_path, 'meta', job_info.copy_id, 'gds_backup_meta.db')
        self._gds_backup_meta_db = GdsBackupMetaFile(f"sqlite:///{db_file_path}")
        try:
            self._gds_backup_meta_db.create_all_table()
        except Exception as err:
            log.error(f"Create all table in sqlite3 err: {err}, {job_info.log_format()}.")
            return False

        change_path_permission(db_file_path, user_name="root", mode=PERMISSION_600)
        return True

    def _check_table_state(self, job_info: JobInfo):
        """
        函数功能：检查待备份的表是否存在
        参数： 任务信息
        返回值 True
        """
        table_info = TableBackup.get_table_info(job_info)
        ret = table_info.ret
        self._db_name = table_info.db_name
        self._backup_tables = table_info.backup_tables
        not_exist_list = table_info.not_exist_list
        if ret == TableBackupErrCode.TABLE_ALL_NOT_EXIST:
            log.error(f"All table not exists, {job_info.log_format()}.")
            raise ErrCodeException(log_detail=LogDetail(logDetail=DwsErrorCode.ALL_TABLE_NOT_EXIST.value,
                                                            logTimestamp=int(time.time()),
                                                            logLevel=DBLogLevel.ERROR))

        if ret != TableBackupErrCode.NO_ERROR:
            log.error(f"Get table info failed, {job_info.log_format()}.")
            return False

        # 上报表不存在的警告
        if not_exist_list:
            self.report_not_exist_table(job_info, not_exist_list)

        return True

    def _write_table_to_file(self, file_path, table_list):
        names = "\n".join(table_list)
        exec_overwrite_file(file_path, names, json_flag=False)
