#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import time
import uuid
from enum import Enum

from dws.commons.common import log, check_path_valid, copy_file_into_sandbox, select_available_port, \
    construct_roach_param, has_uppercase
from common.common_models import LogDetail, SubJobDetails
from common.const import DBLogLevel, SubJobStatusEnum, BackupTypeEnum
from common.file_common import exec_lchown
from common.util.exec_utils import exec_overwrite_file
from common.util.common_utils import get_group_name_by_os_user
from dws.backup.cluster_backup import ClusterBackup
from dws.commons.const import IntrusiveMode, BackupResType, DwsBackupLabel, DwsRoachPort
from dws.commons.function_tool import log_start, progress_notify
from dws.commons.dws_exception import ErrCodeException
from dws.commons.error_code import DwsErrorCode
from dws.commons.job_info import JobInfo
from dws.database.db_models import DwsIndex
from dws.resource.query_interface import QueryRes
from dws.commons.dws_param_parse import CopyInfoParam


class SchemaBackupErrCode(int, Enum):
    # 没有错误
    NO_ERROR = 0
    # 备份的表全部不存在
    ALL_SCHEMA_NOT_EXIST = 1
    # 备份的schema存在部分缺失
    SOME_SCHEMA_MISSING = 2


class SchemaBackup(ClusterBackup):

    def __init__(self, pid):
        super().__init__(pid)
        self._list_path = ""

    @log_start()
    def construct_cmd(self, job_info: JobInfo, intrusive_mode, port):
        self._list_path = os.path.join("/home", job_info.usr, "schema_list_temp.txt")
        if not check_path_valid(self._list_path, "/home/"):
            log.error(f"Schema list path [{self._list_path}] invalid, {job_info.log_format()}.")
            raise Exception("Schema list path invalid")
        ret, db_name = self._create_schema_list_file(self._list_path, job_info)
        if not ret:
            log.error(f"Create schema list file failed, {job_info.log_format()}.")
            raise Exception("Create schema list file failed")
        master_port = select_available_port(DwsRoachPort.ROACH_PORT_START, DwsRoachPort.ROACH_PORT_END)
        if not master_port:
            log.error(f"Master port {master_port} can not use.")
            raise Exception(f"Master port {master_port} can not use")
        roach_param = construct_roach_param()
        backup_cmd = f"python3 $GPHOME/script/GaussRoach.py -t backup --master-port {master_port} " \
                     f"--media-type NBU --media-destination nbu_policy {roach_param} --physical-fine-grained " \
                     f"--metadata-destination {job_info.metadata_destination} " \
                     f"--dbname {db_name} --schema-list {self._list_path} "

        if intrusive_mode == IntrusiveMode.NON_INTRUSIVE_MODE:
            backup_cmd += f" --nbu-on-remote --nbu-media-list /home/{job_info.usr}/media_list_file.txt " \
                          f"--client-port {port}"
        if job_info.backup_type != BackupTypeEnum.FULL_BACKUP:
            try:
                backup_key = CopyInfoParam.get_backup_key(self.get_last_copy_info(job_info))
            except Exception as e:
                log.error(f"Get last backup key err, err: {e}")
                raise e
            if backup_key:
                log.info(f"Backup_key: {backup_key}")
                backup_cmd += f' --prior-backup-key {backup_key}'
            else:
                log.error(f"Last backup key err.")
                raise Exception("Last backup key err.")
        log.info(f"schema backup cmd:{backup_cmd}")
        return backup_cmd

    @log_start()
    def check_schema_state(self, check_schema: set, total_schema: set):
        """
        函数功能：判断schema是否存在, 过滤掉已经删除的schema的数据
        参数：@check_schema： 待备份的schema 参数下发
        @total_schema： 数据库中查询出来的schema
        返回值：ret1: 参见SchemaBackupErrCode
            ret2: 存在的schema资源
            ret3：缺失的schema
        """
        log.info(f"{check_schema}, {total_schema}")
        if len(check_schema - total_schema) == 0:
            return SchemaBackupErrCode.NO_ERROR, check_schema, []
        elif (check_schema - total_schema) == check_schema:
            return SchemaBackupErrCode.ALL_SCHEMA_NOT_EXIST, [], check_schema

        return SchemaBackupErrCode.SOME_SCHEMA_MISSING, \
            list(check_schema & total_schema), list(check_schema - total_schema)

    @log_start()
    @progress_notify(DwsBackupLabel.BACKUP_SUBJOB_SUC_LABEL, DwsBackupLabel.BACKUP_SUBJOB_FAILED_LABEL)
    def sub_job_backup(self, job_info: JobInfo, intrusive_mode):
        """
        函数功能： 备份子任务执行
        """
        log.info("Start to do schema backup.")
        ret, db_name, schemas = self._get_schema_info(job_info)
        if not ret:
            log.error(f"Get schema info failed, {job_info.log_format()}.")
            return False

        query_schemas = QueryRes(job_info.usr, job_info.env_path).get_all_schemas(db_name)
        ret, exist_schema, lack_schema = self.check_schema_state(set(schemas), set(query_schemas))
        if ret == SchemaBackupErrCode.ALL_SCHEMA_NOT_EXIST:
            log.error(f'No schema res in {db_name}')
            raise ErrCodeException(log_detail=LogDetail(logDetail=DwsErrorCode.ALL_SCHEMA_NOT_EXIST.value,
                                                        logTimestamp=int(time.time()),
                                                        logLevel=DBLogLevel.ERROR))

        if ret == SchemaBackupErrCode.SOME_SCHEMA_MISSING:
            log.warn(f'Lack of res [{lack_schema}] in database_name')
            self.report_not_exist_schema(job_info, lack_schema)
            job_info.pro_obj["extendInfo"]["table"] = ','.join([db_name + "/" + item for item in exist_schema])

        return super().sub_job_backup(job_info, intrusive_mode)

    @log_start()
    def report_not_exist_schema(self, job_info: JobInfo, schema_list: list):
        """
        上报警告，不存在的表
        """
        log_detail = [
            LogDetail(
                logInfo=DwsBackupLabel.DWS_BACKUP_SUBJOB_ROACH_SCHEMA_LABEL,
                logInfoParam=[",".join(schema_list)],
                logLevel=DBLogLevel.WARN)
        ]
        self.report_job_details(
            job_info,
            SubJobDetails(
                taskId=job_info.job_id,
                subTaskId=job_info.sub_job_id,
                taskStatus=SubJobStatusEnum.RUNNING,
                progress=10,
                logDetail=log_detail).dict(by_alias=True))

    @log_start()
    def save_cluster_info_with_query(self, job_info: JobInfo):
        """
        功能描述：保存table，database等信息
        """
        log.info("Schema backup save cluster info with query.")
        cluster_uuid = str(uuid.uuid1())
        cluster_name = job_info.protect_env.get('name')
        if not cluster_name:
            raise Exception('Param of cluster_name err')

        self._db.insert_record(DwsIndex(UUID=cluster_uuid,
                                        NAME=cluster_name,
                                        TYPE=BackupResType.CLUSTER.value,
                                        PARENT_PATH='/',
                                        PARENT_UUID=''))

        self.save_database_info_with_query(job_info, cluster_uuid, '/' + cluster_name)

    @log_start()
    def save_database_info_with_query(self, job_info: JobInfo, parent_uuid: str, parent_path: str):
        log.info(f"Cluster, {job_info.usr}, {job_info.env_path}, {parent_uuid}, {parent_path}")

        database_uuid = str(uuid.uuid1())
        db_name = job_info.pro_obj.get('parentName')
        if not db_name:
            raise Exception('Param of db_name err')

        self._db.insert_record(DwsIndex(UUID=database_uuid,
                                        NAME=db_name,
                                        TYPE=BackupResType.DATABASE.value,
                                        PARENT_PATH=parent_path,
                                        PARENT_UUID=parent_uuid))
        self.save_schema_info_with_query(job_info, db_name, database_uuid, f'{parent_path}/{db_name}')

    @log_start()
    def save_schema_info_with_query(self, job_info: JobInfo, database_name: str, parent_uuid: str, parent_path: str):

        log.info(
            f"Cluster, {job_info.usr}, {job_info.env_path}, {parent_uuid}, {parent_path}")

        for schema in job_info.pro_obj.get("extendInfo", {}).get("table", "").split(','):
            schema_uuid = str(uuid.uuid1())
            schema_name = schema.split('/').pop()
            self._db.insert_record(DwsIndex(UUID=schema_uuid,
                                            NAME=schema_name,
                                            TYPE=BackupResType.SCHEMA.value,
                                            PARENT_PATH=parent_path,
                                            PARENT_UUID=parent_uuid))
            self.save_table_info_with_query(job_info,
                                            [database_name, schema_name, schema_uuid, f'{parent_path}/{schema_name}'])

    @log_start()
    def _create_schema_list_file(self, path, job_info: JobInfo):
        log.info(f"Enter, {job_info.log_format()}.")
        schema_str = job_info.pro_obj.get("extendInfo", {}).get("table")
        if not schema_str:
            log.error(f"Param of schema err. {job_info.log_format()}")
            return False, ""

        log.info(f"Start to deal schema name.")
        schema_list = list(map(lambda x: x.split('/').pop(), schema_str.split(",")))
        schema_deal_list = list()
        for sch_str in schema_list:
            if has_uppercase(sch_str):
                schema_deal_list.append(r'"' + sch_str + r'"')
            else:
                schema_deal_list.append(sch_str)
        schema_list_str = '\n'.join(schema_deal_list)
        log.info(f"Deal schema name success.")

        self._write_schema_to_file(path, schema_list_str)
        path = os.path.realpath(path)
        if not exec_lchown(path, job_info.usr, get_group_name_by_os_user(job_info.usr)):
            log.error(f"Change schema file [{path}] owner failed, {job_info.log_format()}.")
            return False, ""
        if not copy_file_into_sandbox(job_info.usr, path, os.path.join('/home', job_info.usr)):
            log.error(f"Copy schema file into sandbox failed.")
            return False, ""
        return True, job_info.pro_obj.get('parentName')

    def _get_schema_info(self, job_info: JobInfo):
        """
        函数功能：从下发参数里面获取schema信息
        参数： @job_info： 下发的任务参数信息
        返回值: ret0: 是否成功获取
            ret1：schema所在的数据库名字
            ret2：（list）要备份schema资源名字
        """
        log.info(f"Enter, {job_info.log_format()}.")

        schemas_str = job_info.pro_obj_extend_info.get("table")
        if not schemas_str:
            log.error(f"Get schema string from extend info failed, {job_info.log_format()}.")
            return False, "", []
        ret_list = []
        schemas_list = filter(lambda x: x.strip(), schemas_str.split(","))
        name_set = set()
        for schema in schemas_list:
            tmp_list = schema.split('/')
            if len(tmp_list) != 2:
                log.error(f"Split err, count: {len(tmp_list)}, {job_info.log_format()}.")
                return False, "", []
            name_set.add(tmp_list[0].strip())
            ret_list.append(tmp_list[1].strip())
        if len(name_set) != 1 or len(ret_list) == 0:
            log.error(f"Result err, name_set cnt: {len(name_set)}, " \
                      f"ret_list cnt:{len(ret_list)}, {job_info.log_format()}.")
            return False, "", []
        return True, name_set.pop(), ret_list

    def _write_schema_to_file(self, file_path, schema_list):
        exec_overwrite_file(file_path, schema_list, json_flag=False)
