#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import json
import threading
import pwd
import grp
import time
from queue import Queue

from dws.commons.common import log, mount_bind_path, get_gds_process_param, execute_cmd, umount_all_gds_data_dir
from common.common_models import SubJobDetails, LogDetail
from common.const import RestoreType, SubJobStatusEnum, DBLogLevel
from common.util.check_utils import check_path_in_white_list
from common.common import check_command_injection_exclude_quote, is_clone_file_system
from common.util.cmd_utils import cmd_format
from dws.commons.function_tool import log_start, thread_lock_func, get_register_ip
from dws.commons.error_code import DwsErrorCode
from dws.commons.const import GDSTableStatus, DwsRetEnum, PERMISSION_755
from dws.commons.progress_notify import write_file, report_job_details_comm
from dws.database.db_sqlite import GdsBackupMetaFile
from dws.resource.dws_cluster import DwsCommon
from dws.resource.dws_database import DwsDatabase
from common.file_common import change_path_permission, exec_lchown_dir_recursively
from common.util.exec_utils import exec_mkdir_cmd, exec_overwrite_file

GSD_TableInfo = {"name": "", "status": 0}
MIN_OUT_LINES_COUNT = 4
TOTAL_COUNT_POS = -2
OPEN_MODE = "w+"
UPDATE_LOCK = threading.Lock()


class GDSRestoreWhiteListJob:

    def __init__(self, job_para):
        self.job_info = job_para
        if self.job_info.get_copy_type() != "s3Archive":
            self.all_repo_data_path = list()
            if not self._get_fs_relation():
                log.error("Get file system map relation ship failed")
                raise Exception("Get file system map relation ship failed")
            if not self._get_db_table_info():
                log.error("Get backup table info failed")
                raise Exception("Get backup table info failed")
            relation = dict()
            for item in self.relation_dict:
                item_info = self.relation_dict.get(item, {})
                list_info = item_info.get("table_list", [])
                if not list_info:
                    continue
                relation[item] = item_info
            self.relation_dict = relation

    def save_gds_and_fs_info(self):
        """
        保存
        """
        copy_id = self.job_info.get_source_backup_copy_id()
        cache_path = self.job_info.get_cache_path()
        if not cache_path:
            log.error("Get cache path is none failed")
            return False
        _, cluster_agent, host_agent = self.job_info.get_nodes_info()
        host_ip = get_register_ip(cluster_agent, host_agent)
        if not host_ip:
            log.error(f"Get host ip failed, {self.job_info.log_format()}.")
            return False
        gds_info_path = os.path.join(cache_path[0], "tmp", copy_id, "gds", f"gds_{host_ip}.txt")
        if not os.path.exists(os.path.dirname(gds_info_path)):
            exec_mkdir_cmd(os.path.dirname(gds_info_path))
        ret, gds_list_info = self._save_gds_info(host_ip, cache_path[0], copy_id)
        if not ret or not gds_list_info:
            log.warning("Current node get gds info failed")
            return True
        umount_all_gds_data_dir()
        if self.job_info.get_copy_type() != "s3Archive":
            if len(gds_list_info) < len(self.relation_dict):
                log.warning(f"Current node gds number:{len(gds_list_info)}"
                            f" lower than fs num: {len(self.relation_dict)} "
                            f"can not execute restore job")
                return True
            elif not self._mount_bind_data_path(gds_list_info):
                log.error("Mount gds data path failed")
                return False
        else:
            if not self._prepare_archive_map_relation(gds_list_info):
                log.error(f"Failed to prepare archive map relation.")
                return False
        if self.job_info.get_copy_type() == "s3Archive":
            write_info = {"gds_info": gds_list_info}
        else:
            write_info = self.relation_dict
        try:
            write_file(gds_info_path, json.dumps(write_info))
        except Exception as ex:
            log.error(f"Write GDS info file failed exception: {ex}")
            return False
        return True

    @log_start()
    def _get_all_repo_data_path(self):
        data_repo_info = self.job_info.get_all_data_repo()
        copy_id = self.job_info.get_source_backup_copy_id()
        for item_repo in data_repo_info:
            fs_id = item_repo.get("extendInfo", {}).get("fsId", "")
            if not fs_id:
                log.warning("Data repository extendInfo no fs id")
                continue
            paths = item_repo.get("path", [""])
            if not paths:
                log.warning("Data repository path is null")
                continue
            data_path = os.path.join(paths[0], "data", copy_id)
            path_object = {"fs_id": fs_id, "data_path": data_path}
            self.all_repo_data_path.append(path_object)

    @log_start()
    def _get_fs_relation(self):
        self._get_all_repo_data_path()
        self.relation_dict = dict()
        if self.job_info.get_copy_type() != "tapeArchive":
            relation_info = self.job_info.get_fs_relation()
        else:
            relation_info = self._get_archive_fs()
        for relation_item in relation_info:
            new_fs_id = relation_item.get("newFsId", "")
            if not new_fs_id:
                log.error(f"Get relation new fs id failed : {relation_item}")
                return False
            for item in self.all_repo_data_path:
                if item.get("fs_id", "") == new_fs_id:
                    item["table_list"] = list()
                    self.relation_dict[relation_item.get("oldFsId")] = item
                    break
        if len(relation_info) != len(self.relation_dict):
            log.error(f"fs relation num: {len(relation_info)} not equal old fs map num: {len(self.relation_dict)}")
            return False
        return True

    @log_start()
    def _get_db_table_info(self):
        copy_id = self.job_info.get_source_backup_copy_id()
        meta_path = self.job_info.get_metadata_path()
        if not copy_id or not meta_path:
            log.error(f"Copy id: {copy_id}  or meta path: {meta_path} is none")
            return False
        db_file_path = os.path.join(meta_path[0], 'meta', copy_id, 'gds_backup_meta.db')
        try:
            self._gds_backup_meta_db = GdsBackupMetaFile(f"sqlite:///{db_file_path}")
        except Exception:
            log.error(f"Get sqlite db object err, {self.job_info.log_format()}.")
            return False
        backup_tables_info = self._gds_backup_meta_db.query_all_records()
        if not backup_tables_info:
            log.error("Get table info failed")
            return False
        for item in backup_tables_info:
            fs_info = self.relation_dict.get(item.FS_ID)
            log.info(f"Item fs id :{item.FS_ID}")
            if not fs_info:
                log.error(f"Old fs id: {item.FS_ID} Data filesystem relation ship not the new filesystem")
                continue
            fs_info["ip_dir"] = item.AGENT_IP
            fs_info["table_list"].append(item.to_dict())

        return True

    @log_start()
    def _mount_bind_data_path(self, gds_list_info):
        gds_index = 0
        for fs_key in self.relation_dict:
            if not self.relation_dict.get(fs_key, {}).get("table_list", ""):
                log.warning("Table list is none")
                continue
            src_path = self.relation_dict.get(fs_key, {}).get("data_path", "")
            if not src_path:
                log.error("Get data path is none")
                return False
            self.relation_dict.get(fs_key)["gds_info"] = gds_list_info[gds_index]
            des_path = gds_list_info[gds_index].get("dataDir")
            ip_dir = self.relation_dict.get(fs_key).get("ip_dir", "")
            src_path = os.path.join(src_path, ip_dir)
            return_code, _, err_str = execute_cmd(cmd_format("chmod -R 755 {}", src_path))
            if return_code != DwsRetEnum.SUCCESS:
                log.error(f"Fail to mode for {src_path} for {err_str}")
                return False
            if not mount_bind_path(src_path, des_path):
                log.error("Mount bind data path failed")
                return False
            gds_index += 1
        return True

    def _save_gds_info(self, host_ip, cache_path, copy_id):
        ret, gds_list_info = get_gds_process_param()
        if not ret or not gds_list_info:
            log.warning("Current node get gds info failed")
            gds_list_info = []
        count_info_path = os.path.join(cache_path, "tmp", copy_id, "gds", f"count_{host_ip}.txt")
        gds_length = len(gds_list_info)
        fs_length = 1 if self.job_info.get_copy_type() == "s3Archive" else len(self.relation_dict)
        write_info = {"GDS_COUNT": gds_length, "FS_COUNT": fs_length}
        try:
            write_file(count_info_path, json.dumps(write_info))
        except Exception as ex:
            log.error(f"Write count info file failed exception: {ex}")
        return ret, gds_list_info

    def _prepare_archive_map_relation(self, gds_list_info):
        gds_info = gds_list_info[0]
        des_path = gds_info.get("dataDir")
        cache_path = self.job_info.get_cache_path()
        copy_id = self.job_info.get_source_backup_copy_id()
        src_path = os.path.join(cache_path[0], "data", copy_id)
        if not check_path_in_white_list(src_path):
            log.error(f"Cache path: {src_path} is invalid")
            return False
        return_code, _, err_str = execute_cmd(cmd_format("chmod -R 755 {}", src_path))
        if return_code != DwsRetEnum.SUCCESS:
            log.error(f"Fail to mode for {src_path} for {err_str}")
            return False
        if not mount_bind_path(src_path, des_path):
            log.error("Mount bind data path failed")
            return False
        return True

    def _get_archive_fs(self):
        metadata_path, _ = self.job_info.get_available_path()
        if not metadata_path:
            log.error(f"No cache/metadata path can be read or written metadata_path:{metadata_path}")
            return False
        temp_download_path = os.path.join(metadata_path, "meta",
                                          self.job_info.get_copy_id(), "archiveDownload")
        log.debug(f"Download path: {temp_download_path}")
        file_path = os.path.join(temp_download_path, "filesystemRelationship.txt")

        with open(file_path, "r") as file:
            fs_info = json.loads(file.read())
        return fs_info.get("relations", [])


class GDSRestoreJob:

    def __init__(self, job_para, is_progress=False):
        self.job_info = job_para
        self.is_progress = is_progress
        self.work_pool = []
        self.concurrent = 0
        self.table_queue = Queue()
        self.restore_type = self.job_info.get_restore_type()
        self.archive_table_list = []
        self.restore_table_data = dict()
        self.error_code = 0
        self.max_gds_count = 0
        self.fs_count = 0
        self.gds_host_ip = ""
        log.debug(f"The restore type is {self.restore_type}.")
        if self.restore_type == RestoreType.INVALID_RESTORE_TYPE:
            raise Exception(f"Can not apply restore type {self.restore_type}")
        if not self._get_max_gds_process_info():
            self.error_code = DwsErrorCode.GDS_CNT_NOT_MATCH if self.max_gds_count \
                else DwsErrorCode.GDS_PROCESS_NOT_EXIST
            if not self._create_table_process_info([]):
                raise Exception("Copy can not apply the job restore")
        else:
            if not self._get_job_table_info():
                raise Exception("Copy can not apply the job restore")
        self._create_schemata_lock = threading.Lock()

    @staticmethod
    def _get_file_name(file):
        file_name, _ = os.path.splitext(file)
        host_info = file_name.split("_")
        if len(host_info) > 1:
            return host_info[1]
        return ""

    @log_start()
    def run_exec(self):
        self._change_data_owner_to_database_user()
        if is_clone_file_system(self.job_info.get_restore_param()):
            self._change_sql_file_mode()
        self._create_work_pool()

    @log_start()
    def get_process(self):
        job_detail = SubJobDetails(taskId=self.job_info.get_main_job_id(), subTaskId=self.job_info.get_subtask_id(),
                                   progress=100, taskStatus=SubJobStatusEnum.FAILED.value)
        if not self.job_info.get_cache_path():
            log.error("Cache path is None can not restore")
            return job_detail
        restore_file = os.path.join(self.job_info.get_cache_path()[0],
                                    f"gds_restore_process_{self.job_info.get_main_job_id()}")
        try:
            with open(restore_file, "r") as f:
                tables_dict = json.loads(f.read())
        except Exception as e:
            log.error(f"Read process file failed table, exception: {e}")
            return job_detail
        finish_count = 0
        total_count = len(tables_dict.get("table_info"))
        failed_count = 0
        self.error_code = tables_dict.get("error_code", 0)
        self.max_gds_count = tables_dict.get("gds_max_count", 0)
        self.fs_count = tables_dict.get("fs_count", 0)
        self.gds_host_ip = tables_dict.get("gds_host_ip", "")
        for item in tables_dict.get("table_info", []):
            if item.get("status") == GDSTableStatus.RESTORE_SUCCESS:
                finish_count = finish_count + 1
            elif item.get("status") == GDSTableStatus.RESTORE_FAILED:
                failed_count += 1
                break
        log_detail = self._construct_log_detail(failed_count, finish_count, total_count)
        if not log_detail:
            job_detail.log_detail = log_detail
        else:
            job_detail.log_detail = [log_detail]
        if self.error_code or not total_count or failed_count:
            job_detail.task_status = SubJobStatusEnum.FAILED.value
            return job_detail
        try:
            progress = finish_count / total_count
        except Exception as e:
            progress = 0
            log.error(f"Table info size is empty {e}")
        if progress == 1:
            job_detail.task_status = SubJobStatusEnum.COMPLETED.value
            return job_detail
        else:
            job_detail.task_status = SubJobStatusEnum.RUNNING.value
            job_detail.progress = int(progress * 100)
            return job_detail

    @log_start()
    def exec(self):
        log.info("Start run restore table data thread ")
        while not self.table_queue.empty():
            table_info = self.table_queue.get()
            try:
                self._exec_item(table_info)
            except Exception as e:
                log.error(f"exec item: {table_info} failed exception: {e}")

    @log_start()
    def _exec_item(self, table_info):
        log.debug(f"Restore info is {table_info}")
        table_name = table_info.get("TABLE_NAME", "")
        schema_name = table_info.get("SCHEMA_NAME", "")
        item_name = f"{schema_name}.{table_name}"
        if not self._create_table(table_info):
            log.error(f'Table {item_name} create failed')
            self._update_process_file(GDSTableStatus.RESTORE_FAILED, item_name)
            return
        if not self._restore_table_data(table_info):
            log.error(f'Restore table {item_name} data failed')
            self._update_process_file(GDSTableStatus.RESTORE_FAILED, item_name)
            return
        self._update_process_file(GDSTableStatus.RESTORE_SUCCESS, item_name)

    @log_start()
    def _get_max_gds_process_info(self):
        cache_path = self.job_info.get_cache_path()
        copy_id = self.job_info.get_source_backup_copy_id()
        gds_info_path = os.path.join(cache_path[0], "tmp", copy_id, "gds")
        file_list = os.listdir(gds_info_path)
        if not file_list:
            log.error("Have not gds node can execute restore job")
            return False
        file_path = ""
        for file_name in file_list:
            if "gds" in file_name:
                file_path = os.path.join(gds_info_path, file_name)
                break
        if not file_path:
            self._find_max_gds_count(gds_info_path, file_list)
            log.error("Failed to get file path.")
            return False
        log.debug(f"File info {file_path}")
        with open(file_path, "r") as file:
            self.restore_table_data = json.loads(file.read())
        return True

    @log_start()
    def _find_max_gds_count(self, gds_info_path, file_list):
        if not gds_info_path or not file_list:
            return
        for file_name in file_list:
            file_path = os.path.join(gds_info_path, file_name)
            with open(file_path, "r") as file:
                count_data = json.loads(file.read())
            if count_data.get("GDS_COUNT", 0) > self.max_gds_count:
                self.max_gds_count = count_data.get("GDS_COUNT", 0)
                self.gds_host_ip = self._get_file_name(file_name)
            self.fs_count = count_data.get("FS_COUNT", 0)

    @log_start()
    def _get_job_table_info(self):
        """
        从任务参数中解析出数据库名称和所有的表名
        """
        if self.is_progress:
            return True
        table_list = []
        usr, env_path = self.job_info.get_target_cluster_auth_info()
        if not usr or not env_path:
            log.error("Get usr or env path failed")
            return False
        dws = DwsDatabase(usr, env_path)
        if self.restore_type == RestoreType.NORMAL_RESTORE:
            table_list = self._get_backuped_tables()
            self.db_name = self.job_info.get_normal_restore_tables()
        elif self.restore_type == RestoreType.FINE_GRAINED_RESTORE:
            self.db_name, table_list, _ = self.job_info.get_fine_grained_restore_tables()
        if not self.db_name or not table_list:
            log.error(f"Get db name or table list failed, {self.job_info.log_format()}.")
            return False
        if not dws.is_db_exists(self.db_name):
            log.error(f"Retore target have not db {self.db_name}")
            self._report_not_exit_database()
            return False
        if not self._check_tables_effectiveness(table_list):
            return False
        if not self._create_table_process_info(table_list):
            return False
        if not self._init_table_queue():
            return False
        return True

    @log_start()
    def _calculation_concurrent(self):
        for item in self.restore_table_data:
            table_list = self.restore_table_data.get(item, {}).get("table_list")
            if not table_list:
                continue
            restore_table_num = 0
            for table_info in table_list:
                if table_info.get("RESTORE", "") == "1":
                    restore_table_num += 1
            gds_concurrent = self.restore_table_data.get(item, {}).get("gds_info", {}).get("concurrent")
            if gds_concurrent > restore_table_num:
                self.concurrent += restore_table_num
            else:
                self.concurrent += gds_concurrent
        log.debug(f"Restore work concurrent is {self.concurrent}")

    @log_start()
    def _create_work_pool(self):
        self._calculation_concurrent()
        for _ in range(self.concurrent):
            worker = threading.Thread(target=self.exec)
            worker.start()
            self.work_pool.append(worker)
        log.info(f"Work pool size is {len(self.work_pool)}")
        for item in self.work_pool:
            item.join()

    @log_start()
    def _create_table_process_info(self, table_list):
        if not self.job_info.get_cache_path():
            log.error("Cache path is None can not restore")
            return False
        table_info_list = []
        for item in table_list:
            gds_table_info = {"name": item, "status": GDSTableStatus.RESTORE_INIT}
            table_info_list.append(gds_table_info)
        restore_file = os.path.join(self.job_info.get_cache_path()[0], f"gds_restore_"
                                                                       f"process_{self.job_info.get_main_job_id()}")
        if os.path.exists(restore_file):
            return True
        tables_info = {
            "table_info": table_info_list, "error_code": self.error_code,
            "gds_max_count": self.max_gds_count, "fs_count": self.fs_count,
            "gds_host_ip": self.gds_host_ip
        }
        ret = exec_overwrite_file(restore_file, tables_info)
        if not ret:
            log.error(f"Write Restore table list file failed.")
            return False
        return True

    @log_start()
    @thread_lock_func(UPDATE_LOCK)
    def _update_process_file(self, status, table_name):
        if not self.job_info.get_cache_path():
            log.error("Cache path is None can not restore")
            return
        restore_file = os.path.join(self.job_info.get_cache_path()[0],
                                    f"gds_restore_process_{self.job_info.get_main_job_id()}")
        try:
            with open(restore_file, "r") as f:
                tables_dict = json.loads(f.read())
        except Exception as e:
            log.error(f"Write process file failed table {table_name}, exception: {e}")
            return
        new_item = []
        for item in tables_dict.get("table_info", [""]):
            if table_name == item.get("name", ""):
                item["status"] = status
                new_item.append(item)
                continue
            new_item.append(item)
        tables_dict["table_info"] = new_item
        try:
            with open(restore_file, OPEN_MODE) as f:
                f.write(json.dumps(tables_dict))
        except Exception as e:
            log.error(f"Write failed process exception {e}")
            return
        return

    @log_start()
    def _construct_log_detail(self, failed_count, finish_count, total_count):
        usr, env_path = self.job_info.get_target_cluster_auth_info()
        log_detail = None
        if not usr or not env_path:
            log.warning("Get usr or env path failed")
            return log_detail
        _, cluster_agent, host_agent = self.job_info.get_nodes_info()
        host_ip = get_register_ip(cluster_agent, host_agent)
        if not host_ip:
            log.warning(f"Get host ip failed, {self.job_info.log_format()}.")
            return log_detail
        if self.error_code == DwsErrorCode.GDS_CNT_NOT_MATCH:
            log_detail = LogDetail(logDetailParam=[f"{self.gds_host_ip}",
                                                 f"{self.max_gds_count}",
                                                 f"{self.fs_count}"],
                                   logTimestamp=int(time.time()), logLevel=3,
                                   logDetail=DwsErrorCode.GDS_CNT_NOT_MATCH)
        elif self.error_code == DwsErrorCode.GDS_PROCESS_NOT_EXIST:
            log_detail = LogDetail(logTimestamp=int(time.time()), logLevel=DBLogLevel.ERROR,
                                   logDetail=DwsErrorCode.GDS_PROCESS_NOT_EXIST)
        elif failed_count:
            log_detail = LogDetail(logInfo="plugin_restore_subjob_fail_label",
                                   logInfoParam=[f"{self.job_info.get_subtask_id()}"],
                                   logTimestamp=int(time.time()), logLevel=3,
                                   logDetail=None)
        elif finish_count <= total_count:
            log_detail = LogDetail(logInfo="dws_restore_subjob_gds_label",
                                   logInfoParam=[host_ip,
                                                 f"{total_count - finish_count}",
                                                 f"{finish_count}"],
                                   logTimestamp=int(time.time()), logLevel=1)
        return log_detail

    @log_start()
    def _check_tables_effectiveness(self, table_list):
        for table_item in table_list:
            if self.job_info.get_copy_type() == "s3Archive":
                schema_name = table_item.split(".")[0]
                table_name = table_item.split(".")[1]
                table = {"SCHEMA_NAME": schema_name, "TABLE_NAME": table_name, "RESTORE": "1"}
                self.archive_table_list.append(table)
            else:
                if not self._check_table_in_repo(table_item):
                    log.error(f"Table: {table_item} not found in repo can not restore")
                    return False
        if self.job_info.get_copy_type() == "s3Archive":
            gds_info = self.restore_table_data.get("gds_info")[0]
            self.restore_table_data = {
                "archive_table_list": {"table_list": self.archive_table_list, "gds_info": gds_info}
            }
        return True

    @log_start()
    def _check_table_in_repo(self, table_item):
        schema_name = table_item.split(".")[0]
        table_name = table_item.split(".")[1]
        for item in self.restore_table_data:
            table_data = self.restore_table_data.get(item, {}).get("table_list", [])
            if not table_data:
                log.error("Repo table list info is none")
                return False
            for table in table_data:
                if table_name == table.get("TABLE_NAME", "") and schema_name == table.get("SCHEMA_NAME", ""):
                    table["RESTORE"] = "1"
                    return True
        log.error(f"Have not table: {table_item} info file")
        return False

    @log_start()
    def _init_table_queue(self):
        if not self.restore_table_data:
            log.error("Restore table data is none")
            return False
        for item in self.restore_table_data:
            table_list = self.restore_table_data.get(item, {}).get("table_list", [])
            if not table_list:
                log.warning("Get restore table list is empty")
                continue
            gds_info = self.restore_table_data.get(item, {}).get("gds_info", {})
            if not gds_info:
                log.error("Get restore gds_info failed")
                return False
            for table in table_list:
                if table.get("RESTORE") == "1":
                    table["gds_info"] = gds_info
                    self.table_queue.put(table)
        log.debug(f"Table queue is: {self.table_queue}")
        return True

    @log_start()
    def _create_schemata(self, schema_name):
        usr, env_path = self.job_info.get_target_cluster_auth_info()
        if not usr or not env_path:
            log.error("Get usr or env path failed")
            return False
        common_obj = DwsCommon(usr, env_path, self.job_info.get_pid())
        schema_name = schema_name.replace('"', '')
        sql_cmd = f"select count(*) from information_schema.schemata where schema_name = '{schema_name}';"
        ret, out_lines = common_obj.exec_gaussdb_sql_cmd(self.db_name, sql_cmd)
        log.debug(f"Execute sql command: {sql_cmd}")

        if not ret:
            log.error(f"Select schema {schema_name} failed, {self.job_info.log_format()}.")
            return False
        log.debug(f"Out put is {out_lines}")
        if len(out_lines) < MIN_OUT_LINES_COUNT:
            log.error(f"Sql cmd out put line count lower {MIN_OUT_LINES_COUNT}")
            return False
        if not int(out_lines[TOTAL_COUNT_POS].strip()):
            sql_cmd = f"create schema \"{schema_name}\";"
            ret, out_lines = common_obj.exec_gaussdb_sql_cmd(self.db_name, sql_cmd)
            log.debug(f"Execute sql command: {sql_cmd}")
            if not ret:
                log.error(f"Create schemata {schema_name} failed, {self.job_info.log_format()}.")
                return False
        return True

    @log_start()
    def _create_table(self, table_info):
        """
        根据sql文件创建所有表
        """
        usr, env_path = self.job_info.get_target_cluster_auth_info()
        if not usr or not env_path:
            log.error("Get usr or env path failed")
            return False
        common_obj = DwsCommon(usr, env_path, self.job_info.get_pid())
        schema_name = table_info.get("SCHEMA_NAME", "")
        table_name = table_info.get("TABLE_NAME", "")
        self._create_schemata_lock.acquire()
        if not self._create_schemata(schema_name):
            self._create_schemata_lock.release()
            log.error("Create schema failed")
            return False
        self._create_schemata_lock.release()
        schema_name = schema_name.replace('"', '')
        table_name = table_name.replace('"', '')
        sql_cmd = f"select count(*) from pg_tables where schemaname='{schema_name}' and tablename='{table_name}';"
        ret, out_lines = common_obj.exec_gaussdb_sql_cmd(self.db_name, sql_cmd)
        log.debug(f"Execute sql commd: {sql_cmd}")
        if not ret:
            log.error(f"Select table {table_name} failed, {self.job_info.log_format()}.")
            return False
        if len(out_lines) < MIN_OUT_LINES_COUNT:
            log.error(f"Sql cmd out put line count lower {MIN_OUT_LINES_COUNT}")
            return False
        if int(out_lines[TOTAL_COUNT_POS].strip()):
            sql_cmd = f"drop table \"{schema_name}\".\"{table_name}\";"
            ret, out_lines = common_obj.exec_gaussdb_sql_cmd(self.db_name, sql_cmd)
            if not ret:
                log.error(f"Drop table {table_name} failed, {self.job_info.log_format()}.")
                return False
        return self._exec_create_sql_file(schema_name, table_name)

    @log_start()
    def _change_sql_file_mode(self):
        if self.job_info.get_copy_type() == "s3Archive":
            return True
        usr, env_path = self.job_info.get_target_cluster_auth_info()
        if not usr or not env_path:
            log.error("Get usr or env path failed")
            return False
        meta_path = self.job_info.get_metadata_path()
        copy_id = self.job_info.get_source_backup_copy_id()
        meta_dir = os.path.join(meta_path[0], 'meta')
        if check_command_injection_exclude_quote(meta_dir):
            log.error(f"Meta path: {meta_dir} is invailed")
            return False
        copy_id_dir = os.path.join(meta_dir, copy_id)
        gds_data_dir = os.path.join(meta_path[0], 'meta', copy_id, "gsdump")
        try:
            group_id = pwd.getpwnam(str(usr)).pw_gid
        except Exception as err:
            log.error(f"Get database user failed exception: {err}")
            return False
        database_user_group = grp.getgrgid(group_id).gr_name
        if not change_path_permission(meta_dir, mode=PERMISSION_755):
            log.error(f"Fail to mode for {meta_dir}.")
            return False
        if not change_path_permission(copy_id_dir, mode=PERMISSION_755):
            log.error(f"Fail to mode for {copy_id_dir}.")
            return False
        if not exec_lchown_dir_recursively(gds_data_dir, usr, database_user_group):
            log.error(f"Fail to change user/group for {gds_data_dir}.")
            return False
        return True

    @log_start()
    def _restore_table_data(self, table_info):
        usr, env_path = self.job_info.get_target_cluster_auth_info()
        if not usr or not env_path:
            log.error("Get usr or env path failed")
            return False
        common_obj = DwsCommon(usr, env_path, self.job_info.get_pid())
        table_name = table_info.get("TABLE_NAME")
        schema_name = table_info.get("SCHEMA_NAME")
        foreign_table_name = f"foreign_{schema_name}_{table_name}" \
                             f"{self.job_info.get_main_job_id().replace('-', '_')}".replace('"', '')
        foreign_data_name = f"foreign_{schema_name}_{table_name}".replace('"', '')
        log.debug(f"Restore table name [{table_name}], {self.job_info.log_format()}.")
        gds_ip_port = table_info.get("gds_info").get("ipPort")
        # 创建外表
        sql_cmd = f"CREATE FOREIGN TABLE \"{foreign_table_name}\"(like \"{schema_name}\".\"{table_name}\") " \
                  f"SERVER gsmpp_server OPTIONS (LOCATION \'gsfs://{gds_ip_port}/{foreign_data_name}.dat.0\', " \
                  f"FORMAT \'text\', DELIMITER \',\', EOL \'0x0a\');"
        ret, _ = common_obj.exec_gaussdb_sql_cmd(self.db_name, sql_cmd)
        log.debug(f"Execute sql command: {sql_cmd}")
        if not ret:
            log.error(f"Create foreign table failed, {self.job_info.log_format()}.")
            return False

        # 插入数据
        sql_cmd = f"INSERT INTO \"{schema_name}\".\"{table_name}\" select * from \"{foreign_table_name}\";"
        ret, _ = common_obj.exec_gaussdb_sql_cmd(self.db_name, sql_cmd)
        log.debug(f"Execute sql command: {sql_cmd}")
        if not ret:
            log.error(f"Insert into foreign table failed, {self.job_info.log_format()}.")
            return False
        # 删除外表
        sql_cmd = f"DROP FOREIGN TABLE \"{foreign_table_name}\";"
        ret, _ = common_obj.exec_gaussdb_sql_cmd(self.db_name, sql_cmd)
        log.debug(f"Execute sql command: {sql_cmd}")
        if not ret:
            log.error(f"Drop foreign table failed, {self.job_info.log_format()}.")
            return False
        return True

    def _report_not_exit_database(self):
        """
        上报待恢复的表所依赖的数据库不存在
        """
        log_detail = LogDetail(logInfo="plugin_restore_subjob_fail_label",
                               logInfoParam=[f"{self.job_info.get_subtask_id()}"],
                               logTimestamp=int(time.time()), logLevel=3,
                               logDetail=DwsErrorCode.NO_RESTORE_TARGET_DATABASE,
                               logDetailParam=[])
        ret = report_job_details_comm(
            self.job_info.get_subtask_id(),
            SubJobDetails(
                taskId=self.job_info.get_main_job_id(),
                subTaskId=self.job_info.get_subtask_id(),
                taskStatus=SubJobStatusEnum.FAILED,
                progress=100,
                logDetail=[log_detail]).dict(by_alias=True))
        log.info(f"Report not exits database {ret} when restore, {self.job_info.log_format()}.")

    def _report_not_exist_tables(self, original_tables: list, exist_tables: list):
        """
        检查实际已备份的表和表级对象选择的表，如果存在差异，则上报不存在的表
        """
        not_exist_tables = list(set(original_tables) - set(exist_tables))
        if not_exist_tables:
            log_detail = LogDetail(
                logInfo="dws_restore_table_not_exist_label",
                logInfoParam=[','.join(not_exist_tables)],
                logTimestamp=int(time.time()),
                logLevel=DBLogLevel.WARN)
            ret = report_job_details_comm(
                self.job_info.get_subtask_id(),
                SubJobDetails(
                    taskId=self.job_info.get_main_job_id(),
                    subTaskId=self.job_info.get_subtask_id(),
                    taskStatus=SubJobStatusEnum.RUNNING,
                    progress=10,
                    logDetail=[log_detail]).dict(by_alias=True))
            log.info(f"Report not exits tables ret [{ret}] when restore, {self.job_info.log_format()}.")

    def _get_backuped_tables(self):
        """
        获取备份时的所有表信息
        """
        backuped_list = []
        copy_id = self.job_info.get_source_backup_copy_id()
        if self.job_info.get_copy_type() == "s3Archive":
            cache_path = self.job_info.get_cache_path()
            db_file_path = os.path.join(cache_path[0], 'meta', copy_id, 'objectmeta', 'gds_backup_meta.db')
        else:
            meta_path = self.job_info.get_metadata_path()
            db_file_path = os.path.join(meta_path[0], 'meta', copy_id, 'gds_backup_meta.db')

        try:
            tmp_db_obj = GdsBackupMetaFile(f"sqlite:///{db_file_path}")
        except Exception:
            log.error(f"Get sqlite db object err, {self.job_info.log_format()}.")
            return []
        for item in tmp_db_obj.query_all_records():
            backuped_list.append(f"{item.SCHEMA_NAME}.{item.TABLE_NAME}")
        log.info(f"Get all backuped tables success, {self.job_info.log_format()}.")
        return backuped_list

    def _exec_create_sql_file(self, schema_name, table_name):
        usr, env_path = self.job_info.get_target_cluster_auth_info()
        if not usr or not env_path:
            log.error("Get usr or env path failed")
            return False

        copy_id = self.job_info.get_source_backup_copy_id()
        if not copy_id:
            log.error("Meta path or copy id is invalid")
            return False
        common_obj = DwsCommon(usr, env_path, self.job_info.get_pid())

        sql_name = f"{schema_name}.{table_name}.sql"
        if self.job_info.get_copy_type() == "s3Archive":
            cache_path = self.job_info.get_cache_path()
            if not cache_path:
                return False
            create_table_sql_file = os.path.join(cache_path[0], "meta", copy_id,
                                                 "objectmeta", f"{sql_name}")
        else:
            meta_path = self.job_info.get_metadata_path()
            gds_data_dir = os.path.join(meta_path[0], 'meta', copy_id, "gsdump")
            create_table_sql_file = os.path.join(gds_data_dir, f"{sql_name}")
        if not os.path.exists(create_table_sql_file):
            log.error(f"Create table sql file: {create_table_sql_file} not exist")
            return False
        ret, _ = common_obj.exec_gaussdb_sql_file(self.db_name, create_table_sql_file)
        if not ret:
            log.error(f"Create table: {table_name} failed cannot restore data, {self.job_info.log_format()}")
            return ret
        log.info(f"Create tables success, {self.job_info.log_format()}.")
        return True

    def _change_data_owner_to_database_user(self):
        user, _ = self.job_info.get_target_cluster_auth_info()
        if not user:
            log.error("Get database user failed")
            return False
        cache_path = self.job_info.get_cache_path()[0]
        if cache_path == "":
            log.error(f"No cache path can be read or written. cache_path: {cache_path}, "
                      f"main task:{self.job_info.get_main_job_id()}")
            return False
        if not check_path_in_white_list(cache_path):
            log.error(f"Cache path:{cache_path} is invaild")
            return False
        log.debug(f"Cache path {cache_path}")
        change_path_permission(os.path.dirname(cache_path), mode=PERMISSION_755)
        group_id = pwd.getpwnam(str(user)).pw_gid
        database_user_group = grp.getgrgid(group_id).gr_name
        # 修改权限
        if not change_path_permission(cache_path, mode=PERMISSION_755):
            log.error(f"Fail to change mod for {cache_path}. main task:{self.job_info.get_main_job_id()}")
            return False
        if not exec_lchown_dir_recursively(cache_path, user, database_user_group):
            log.error(f"Fail to change user/group for {cache_path}. "
                      f"main task:{self.job_info.get_main_job_id()}")
            return False
        return True
