#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import locale
import os
import time
from concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION, ALL_COMPLETED
from threading import Lock

from common.const import SubJobStatusEnum, EnvInfoPrefix, AuthType, ReportDBLabel, BackupTypeEnum
from common.err_code import CommErrCode
from sqlserver import log
from sqlserver.commons.common import output_action_result, get_env_variable, get_channel_number, \
    is_need_adjust_channel, minimum_vdi_virtual_device
from sqlserver.commons.const import SQLServerCode, ExecCmdResult, \
    SQLServerProgressFileType, Progress, VDIActionType, SQLTmpFileName, ParamConstant, \
    BodyErr, SubJobExecResult, SQLServerStrConstant
from sqlserver.commons.models import ExtendInfo
from sqlserver.commons.sqlserver_utils import get_offset_to_utc, SqlServerUtils
from sqlserver.sqlserver_base import SQLServerBase


class SQLServerBackupInstance(SQLServerBase):
    def __init__(self, p_id, job_id, sub_job_id, json_param):
        super().__init__(p_id, job_id, sub_job_id, json_param)
        self._job_id = job_id
        self.database_list = list()
        self.auth_mode = str(AuthType.NO_AUTO.value)
        self.user_name = ""
        self.database_num = 0
        self.instance_name = ""
        self.pass_prefix = ""
        self.channel_number = SQLServerStrConstant.DEFAULT_CHANNEL

    def prepare_user_infos(self):
        self.channel_number = get_channel_number(self._json_param)
        self.instance_name = self._json_param.get("job", {}).get("protectObject", {}).get("name", "")
        self.auth_mode = get_env_variable(f"{EnvInfoPrefix.OBJ_AUTH_TYPE}_{self._p_id}")
        self.user_name = get_env_variable(f'{EnvInfoPrefix.OBJ_USERNAME_PREFIX}_{self._p_id}')
        self.pass_prefix = f'{EnvInfoPrefix.OBJ_PASS_PREFIX}_{self._p_id}'
        self.check_and_prepare_user_info(self.instance_name)

    def exec_check_allow_backup_in_local(self):
        """
        检查日志备份是否支持
        :return:
        """
        self.prepare_user_infos()
        if not self.sql_utils.check_service_is_running(self.instance_name):
            node_ip = self._json_param.get("job", {}).get("protectEnv", {}).get("nodes", [{}])[0].get("endpoint", "")
            output_action_result(self._p_id, SQLServerCode.FAILED.value,
                                 BodyErr.INSTANCE_STATE_ABNORMAL.value, "", [node_ip, self.instance_name])
            return False
        flags = self.sql_utils.check_user_permission(self.auth_mode, self.user_name)
        if not flags:
            log.error(f"Check user permission failed. pid: {self._p_id} jobId: {self._job_id}")
            output_action_result(self._p_id, SQLServerCode.FAILED.value, BodyErr.SQLSERVER_PERMISSIONS_ERROR.value, "")
            return False
        log.info(f"Succeed to check allow backup in local. pid: {self._p_id} jobId: {self._job_id}")
        return True

    def check_allow_backup_in_local(self):
        log.info(f"Start to exec check allow backup in local, pid: {self._p_id} jobId: {self._job_id}")
        try:
            ret = self.exec_check_allow_backup_in_local()
        except Exception as err:
            log.error(f"Exception when check allow in local, err: {err}. pid: {self._p_id} jobId: {self._job_id}")
            output_action_result(self._p_id, SQLServerCode.FAILED.value, 0, "")
            return False
        return ret

    def prepare_backup_databases(self):
        self.database_list = self.sql_utils.get_instance_database_size_asc()
        always_list = self.sql_utils.get_ag_database_list()
        # 系统数据库tempdb不支持备份，移除
        if "tempdb" in self.database_list:
            self.database_list.remove("tempdb")
        # 可用性组中的数据库不进行备份
        for names in always_list:
            if names in self.database_list:
                self.database_list.remove(names)
        report_list = SqlServerUtils.get_report_service_database_info(self.instance_name)
        for names in report_list:
            if names in self.database_list:
                self.database_list.remove(names)
        # 系统数据库master不支持日志备份，移除
        if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
            if "master" in self.database_list:
                self.database_list.remove("master")

    def check_job_type(self):
        """
        差异备份中检查增量转全量接口
        查找DATA_PATH下是否存在需要差异转全量的数据库
        """
        log.info("Enter check_backup_job_type_inc")
        self.prepare_user_infos()
        self.prepare_backup_databases()
        ret = self.check_dir_is_exist(self.database_list)
        if ret:
            result = SQLServerCode.SUCCESS.value, SQLServerCode.SUCCESS.value, ""
        else:
            result = SQLServerCode.FAILED.value, BodyErr.ERR_INC_TO_FULL.value, ""
        log.info("Leave check_backup_job_type_inc")
        return result

    def check_backup_job_type(self):
        log.info(f"Start to check backup type, pid: {self._p_id} jobId: {self._job_id}")
        if self.backup_type == BackupTypeEnum.FULL_BACKUP.value:
            log.info(f"Support full backup. pid: {self._p_id} jobId: {self._job_id}")
            return SQLServerCode.SUCCESS.value, SQLServerCode.SUCCESS.value, ""
        try:
            code, body_error_code, msg = self.check_job_type()
        except Exception as err:
            log.error(f"Exception when exec check backup type, pid: {self._p_id} jobId: {self._job_id}, err:{err}")
            return SQLServerCode.FAILED.value, SQLServerCode.SUCCESS.value, ""
        log.info(f"Check job type code: {code}, body_error_code: {body_error_code}, msg: msg, pid: {self._p_id}"
                 f" jobId: {self._job_id}")
        return code, body_error_code, msg

    def pre_job(self):
        self.prepare_user_infos()
        self.prepare_backup_databases()
        if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
            # 日志备份检查数据库恢复模式
            simple_list = self.sql_utils.get_simple_sql_list()
            if simple_list:
                self.error_logtail.log_detail = BodyErr.SQLSERVER_REVERTIVE_MODE_ERROR.value
                self.error_logtail.log_detail_param = [self.sql_utils.list_to_str(simple_list)]
                log.error(f"Databases: {simple_list} in simple mode")
                raise Exception('Databases with simple recovery mode')
            # 检查上一次日志备份状态
            self.check_last_backup()
        # 检查数据库状态
        abnormal_list = list()
        abnormal_databases = self.sql_utils.get_abnormal_database_list()
        for database in self.database_list:
            if database in abnormal_databases:
                abnormal_list.append(database)
        if abnormal_list:
            self.error_logtail.log_detail = BodyErr.BACKUP_FAIL_FOR_DATABASE_STATE_ABNORMAL.value
            self.error_logtail.log_detail_param = [self.sql_utils.list_to_str(abnormal_list)]
            log.error(f"Databases: {abnormal_list} in abnormal state")
            raise Exception('Databases with abnormal state')
        self.write_progress_to_file(SubJobStatusEnum.COMPLETED.value, Progress.PROGRESS_ONE_HUNDRED,
                                    "", SQLServerProgressFileType.COMMON)
        return True

    def exec_pre_job(self):
        log.info(f"Start to do pre job, pid: {self._p_id} jobId: {self._job_id}")
        try:
            ret = self.pre_job()
        except Exception as err:
            self.error_logtail.log_info = ReportDBLabel.PRE_REQUISIT_FAILED
            self.write_progress_to_file(SubJobStatusEnum.FAILED.value, Progress.PROGRESS_ONE_HUNDRED,
                                        self.error_logtail, SQLServerProgressFileType.COMMON)
            log.error(f"Exception when pre job. pid: {self._p_id} jobId: {self._job_id}, err: {str(err)}")
            return False
        log.info(f"Pre job result is ret: {ret}. pid: {self._p_id} jobId: {self._job_id}")
        return ret

    def backup_database_thread(self, database, lock, copy_extend_info):
        log.info(f"Start to backup database: {database}")
        copy_path = os.path.join(self._data_path, f"{database}.bak")
        # master进行全量备份
        # backupAPI要求最低传入2
        virtual_device_count = minimum_vdi_virtual_device(self.channel_number)
        if database == "master":
            cmd = f"{ParamConstant.VDI_TOOL_PATH} {VDIActionType.FULL_BACKUP.value} \"{database}\" " \
                  f"\"{copy_path}\" \"{self.vdi_info}\" \"\" \"\" {virtual_device_count}"
        else:
            cmd = (f"{ParamConstant.VDI_TOOL_PATH} {self.vdi_type} \"{database}\" \"{copy_path}\" \"{self.vdi_info}\""
                   f" \"\" \"\" {virtual_device_count}")
        log.info(f"cmd: {cmd}")
        code, std_out, _ = self.sql_utils.get_command_result_with_encode(cmd, 'UTF-8')
        if code != ExecCmdResult.SUCCESS:
            log.error(f"Failed to backup database: {database}, out: {std_out}")
            if "there is no current database backup" in str(std_out):
                self.error_logtail.log_detail = BodyErr.BACKUP_FAIL_FOR_FULL_BACKUP_NEED.value
            elif is_need_adjust_channel(std_out):
                self.error_logtail.log_detail = BodyErr.ADJUST_CHANNELS.value
            else:
                self.error_logtail.log_detail = CommErrCode.FAILED_EXECUTE_COMMAND
                self.error_logtail.log_detail_param = ["BACKUP", std_out]
            raise Exception(f"backup {database} failed")
        with lock:
            database_meta_info = self.sql_utils.get_latest_backup_meta_info(database, self.vdi_type)
            copy_extend_info.meta_info.update({
                f"{database}": database_meta_info
            })
            SubJobExecResult.SUCCESS_NUM += 1
            Progress.PROGRESS += Progress.INTERVAL_PROGRESS
            self.running_tail.log_info_param = [
                self._sub_job_id, f"{self.database_num - SubJobExecResult.SUCCESS_NUM}",
                f"{SubJobExecResult.SUCCESS_NUM}"
            ]
            self.write_progress_to_file(SubJobStatusEnum.RUNNING.value, Progress.PROGRESS, self.running_tail,
                                        SQLServerProgressFileType.COMMON)
        return True, database

    def backup_instance(self):
        """
        实例备份
        """
        log.info(f"Start to backup log instance. pid: {self._p_id} jobId: {self._job_id}")
        copy_extend_info = ExtendInfo()
        try:
            Progress.INTERVAL_PROGRESS = int(Progress.MAX_PROGRESS / self.database_num)
        except Exception as err:
            log.error(f'Exception when calculate the INTERVAL_PROGRESS as err: {str(err)}')
            return False
        if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
            sys_databases = ("model", "msdb")
        else:
            copy_extend_info.begin_time = int(time.time())
            sys_databases = ("master", "model", "msdb")
        # 系统数据库先进行备份
        lock = Lock()
        for database in sys_databases:
            self.backup_database_thread(database, lock, copy_extend_info)
        with ThreadPoolExecutor(max_workers=self.channel_number, thread_name_prefix="backup_thread") as executor:
            task_dict, task_list = {}, []
            for database in self.database_list:
                if database in sys_databases:
                    continue
                task = executor.submit(self.backup_database_thread, database, lock, copy_extend_info)
                task_dict[task] = database
                task_list.append(task)
            # 一旦有任务抛出异常，结束执行
            wait(task_list, return_when=FIRST_EXCEPTION)
            for task in reversed(task_list):
                task.cancel()
            fail_database_list = list()
            for task in task_list:
                if task_dict.get(task) and "finished raised Exception" in str(task):
                    fail_database_list.append(task_dict.get(task))
            if fail_database_list:
                log.error(f"databases:{fail_database_list} backup failed")
                return False
            # 等待任务全部完成
            wait(task_list, return_when=ALL_COMPLETED)
        copy_extend_info.end_time = int(time.time())
        copy_extend_info.backup_time = copy_extend_info.end_time
        copy_extend_info.offset_to_utc = get_offset_to_utc()
        copy_extend_info.version = self.sql_utils.get_version()
        # 判断上次时间
        if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
            copy_extend_info.begin_time = self.get_data_start_time(copy_extend_info)
        self.write_tmp_file(copy_extend_info.json(by_alias=True), SQLTmpFileName.COPY_INFO_FILE)
        # 保存sqlite信息
        self.save_sqlite_info(self.database_list)
        log.info(f"End to backup log, pid: {self._p_id} jobId: {self._job_id}")
        return True

    def exec_backup_job(self):
        self.prepare_user_infos()
        # 获取当前实例中所有数据库
        self.prepare_backup_databases()
        if not self.database_list:
            log.error(f"Failed to get databases. pid: {self._p_id} jobId: {self._job_id}")
            return False
        self.database_num = len(self.database_list)
        # 开始备份前记录备份进度
        self.running_tail.log_info_param = [self._sub_job_id, f"{self.database_num}", "0"]
        self.write_progress_to_file(SubJobStatusEnum.RUNNING.value, Progress.PROGRESS, self.running_tail,
                                    SQLServerProgressFileType.COMMON)
        if not self.prepare_path_param():
            log.error(f"Failed to get path info")
            return False
        self.convert_vdi_type()
        # 备份前查询数据库相关文件
        self.save_meta_info(self.database_list, "meta.info", True)
        return self.backup_instance()

    def exec_sub_job(self):
        """
        执行实例备份
        :return:
        """
        log.info(f"Start to backup. pid: {self._p_id} jobId: {self._job_id}")
        try:
            result = self.exec_backup_job()
        except Exception as err:
            log.exception(f"Exception when exec backup, err: {err}. pid: {self._p_id} jobId:{self._job_id}")
            self.write_progress_to_file(SubJobStatusEnum.FAILED.value,
                                        Progress.PROGRESS_ONE_HUNDRED,
                                        self.error_logtail, SQLServerProgressFileType.COMMON)
            return False
        self.update_status_file(result)
        if result:
            self.success_logtail.log_info_param = [self._sub_job_id, f"{self.database_num}"]
            self.write_progress_to_file(SubJobStatusEnum.COMPLETED.value,
                                        Progress.PROGRESS_ONE_HUNDRED,
                                        self.success_logtail, SQLServerProgressFileType.COMMON)
            log.info(f"Succeed to backup. pid: {self._p_id} jobId:{self._job_id}")
            return True
        self.write_progress_to_file(SubJobStatusEnum.FAILED.value,
                                    Progress.PROGRESS_ONE_HUNDRED,
                                    self.error_logtail, SQLServerProgressFileType.COMMON)
        log.error(f"Failed to backup. pid: {self._p_id} jobId:{self._job_id}")
        return False

    def query_copy_info(self):
        try:
            ret = self.query_copy_infos(self.backup_type)
        except Exception as err:
            log.exception(f"Exception when query copy info, err: {err}")
            return False
        if not ret:
            output_action_result(self._p_id, SQLServerCode.FAILED.value, 0, "")
        return ret
