#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import time
from threading import Thread

from common.const import BackupTypeEnum, SubJobStatusEnum, ReportDBLabel
from sqlserver import log
from sqlserver.commons.common import get_env_variable, output_action_result, get_channel_number, \
    minimum_vdi_virtual_device
from sqlserver.commons.const import Progress, SQLServerProgressFileType, \
    SQLServerCode, SQLTmpFileName, ParamConstant, ExecCmdResult, BodyErr, SubJobExecResult, SQLServerStrConstant
from sqlserver.commons.models import ExtendInfo
from sqlserver.commons.sqlserver_utils import get_offset_to_utc
from sqlserver.sqlserver_base import SQLServerBase


class SQLServerBackupAlwaysOn(SQLServerBase):
    def __init__(self, p_id, job_id, sub_job_id, json_param):
        super().__init__(p_id, job_id, sub_job_id, json_param)
        self._job_id = job_id
        self.report_progress_thread_start = True
        self.group_name = ""
        self.database_list = list()
        self.database_num = 0
        self.fail_list = list()  # 备份失败列表
        self.node_ip = ""
        self.instance_name = ""
        self.auth_mode = ""
        self.user_name = ""
        self.pass_prefix = ""
        self.channel_number = SQLServerStrConstant.DEFAULT_CHANNEL

    def prepare_user_infos(self):
        self.group_name = self._json_param.get("job", {}).get("protectObject", {}).get("name", None)
        self.channel_number = get_channel_number(self._json_param)
        if not self.group_name:
            log.error("Failed to get group name")
            raise Exception("Failed to get group name")
        self.instance_name = self._json_param.get("job", {}).get("protectObject", {}).get("extendInfo", {}) \
            .get("instanceName", "")
        localhost = self.get_hostname()
        # 可用性组认证信息从protectEnv中获取
        nodes = self._json_param.get("job", {}).get("protectEnv", {}).get("nodes", [])
        if not nodes:
            log.error("Failed to get nodes info")
            raise Exception("Failed to get nodes info")
        for idx, node in enumerate(nodes):
            if node.get("name") == localhost:
                self.node_ip = node.get("endpoint", "")
                self.auth_mode = get_env_variable(f"job_protectEnv_nodes_{idx}_auth_authType_{self._p_id}")
                self.user_name = get_env_variable(f'job_protectEnv_nodes_{idx}_auth_authKey_{self._p_id}')
                self.pass_prefix = f'job_protectEnv_nodes_{idx}_auth_authPwd_{self._p_id}'
                self.check_and_prepare_user_info(self.instance_name)
                break

    def do_check_allow_backup(self):
        self.prepare_user_infos()
        if not self.sql_utils.check_service_is_running(self.instance_name):
            log.error(f"The instance: {self.instance_name} is not running")
            raise Exception("The instance is not running")
        return self.sql_utils.check_is_primary_replica(self.group_name)

    def check_allow_backup_in_local(self):
        """
        检查本机是否能做备份任务：是否是主副本所在节点
        :return:
        """
        log.info(f"Start to check allow backup in local. pid: {self._p_id} jobId: {self._job_id}")
        try:
            result = self.do_check_allow_backup()
        except Exception as err:
            if "The instance is not running" in str(err):
                output_action_result(self._p_id, SQLServerCode.FAILED.value,
                                     BodyErr.INSTANCE_STATE_ABNORMAL.value, "", [self.node_ip, self.instance_name])
            else:
                output_action_result(self._p_id, SQLServerCode.FAILED.value, BodyErr.ERROR_INTERNAL.value, "")
            log.exception(f"Exception when check primary replica, err: {err}")
            return False
        if result:
            log.info(f"Primary replica and allow backup. pid: {self._p_id} jobId: {self._job_id}")
            return True
        log.info(f"Secondary replica not support backup. pid: {self._p_id} jobId: {self._job_id}")
        output_action_result(self._p_id, SQLServerCode.FAILED.value, BodyErr.ERROR_INTERNAL.value, "")
        return False

    def check_diff_job_type(self):
        self.get_data_path()
        if not self._data_path:
            log.error(f"Failed to get data path. pid: {self._p_id} jobId: {self._job_id}")
            return SQLServerCode.FAILED.value, 0, ""
        self.prepare_user_infos()
        full_copy_dirs = list()
        for path in os.listdir(self._data_path):
            if path.startswith("full"):
                full_copy_dirs.append(os.path.join(self._data_path, path))
        if not full_copy_dirs:
            log.error(f"Full copy is empty. pid: {self._p_id} jobId: {self._job_id}")
            return SQLServerCode.FAILED.value, BodyErr.ERR_INC_TO_FULL.value, ""
        # os.path.getmtime() 函数是获取目录最后修改时间
        dirs = sorted(full_copy_dirs, key=lambda x: os.path.getmtime(x))
        full_copy_file = os.listdir(dirs[-1])
        database_list = self.sql_utils.get_always_on_database(self.group_name)
        for database in database_list:
            if f"{database}.bak" not in full_copy_file:
                log.error(f"Database: {database} copy not exist. pid: {self._p_id} jobId: {self._job_id}")
                return SQLServerCode.FAILED.value, BodyErr.ERR_INC_TO_FULL.value, ""
        result = self.check_full_copy(database_list)
        if not result:
            log.error(f"full backup copy is not the same as the latest full backup copy on the database backupset.  \
                    pid: {self._p_id} jobId: {self._job_id}")
            return SQLServerCode.FAILED.value, BodyErr.ERR_INC_TO_FULL.value, ""
        log.info(f"Full copy exist. pid: {self._p_id} jobId: {self._job_id}")
        return SQLServerCode.SUCCESS.value, 0, ""

    def check_backup_job_type(self):
        log.info(f"Start to exec check backup type, pid: {self._p_id} jobId: {self._job_id}")
        if self.backup_type == BackupTypeEnum.FULL_BACKUP.value:
            log.info(f"Support full backup. pid: {self._p_id} jobId: {self._job_id}")
            return SQLServerCode.SUCCESS.value, 0, ""
        try:
            code, body_err, message = self.check_diff_job_type()
        except Exception as err:
            log.error(f"Exception when check diff backup job as err: {err} jobId: {self._job_id}")
            return SQLServerCode.FAILED.value, 0, ""
        return code, body_err, message

    def pre_job(self):
        self.write_progress_to_file(SubJobStatusEnum.RUNNING.value, Progress.PROGRESS,
                                    "", SQLServerProgressFileType.COMMON)
        self.prepare_user_infos()
        # 检查用户权限
        if not self.sql_utils.check_user_permission(self.auth_mode, self.user_name):
            self.error_logtail.log_detail = BodyErr.SQLSERVER_PERMISSIONS_ERROR.value
            log.error("Check user permission failed")
            raise Exception("Check user permission failed")
        # 检查数据库状态
        database_list = self.sql_utils.get_always_on_database(self.group_name)
        if not database_list:
            self.error_logtail.log_detail = BodyErr.ALWAYS_ON_GROUP_NO_DATABASE.value
            log.error("always on group no database")
            raise Exception("always on group no database")
        abnormal_list = list()
        abnormal_databases = self.sql_utils.get_abnormal_database_list()
        for database in database_list:
            if database in abnormal_databases:
                abnormal_list.append(database)
        if abnormal_list:
            self.error_logtail.log_detail = BodyErr.BACKUP_FAIL_FOR_DATABASE_STATE_ABNORMAL.value
            self.error_logtail.log_detail_param = [self.sql_utils.list_to_str(abnormal_list)]
            log.error(f"Databases: {abnormal_list} in abnormal state")
            raise Exception('Databases with abnormal state')
        # 日志备份检查上一次日志备份状态
        if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
            self.check_last_backup()
        self.write_progress_to_file(SubJobStatusEnum.COMPLETED.value, Progress.PROGRESS_ONE_HUNDRED,
                                    "", SQLServerProgressFileType.COMMON)
        return True

    def exec_pre_job(self):
        log.info(f"Start to do pre job, pid: {self._p_id} jobId: {self._job_id}")
        try:
            ret = self.pre_job()
        except Exception as err:
            self.error_logtail.log_info = ReportDBLabel.PRE_REQUISIT_FAILED
            self.write_progress_to_file(SubJobStatusEnum.FAILED.value, Progress.PROGRESS_ONE_HUNDRED,
                                        self.error_logtail, SQLServerProgressFileType.COMMON)
            log.error(f"Exception when pre job. pid: {self._p_id} jobId: {self._job_id},err: {err}")
            return False
        log.info(f"Pre job result is ret: {ret}. pid: {self._p_id} jobId: {self._job_id}")
        return ret

    def backup_thread(self, database, extend_info):
        log.info(f"Start to backup database: {database}")
        copy_path = os.path.join(self._data_path, f"{database}.bak")
        virtual_device_count = minimum_vdi_virtual_device(self.channel_number)
        cmd = (f"{ParamConstant.VDI_TOOL_PATH} {self.vdi_type} \"{database}\" \"{copy_path}\" \"{self.vdi_info}\""
               f" \"\" \"\" {virtual_device_count}")
        log.info(f"cmd:{cmd}")
        code, std_out, std_err = self.sql_utils.get_command_result(cmd)
        database_meta_info = self.sql_utils.get_latest_backup_meta_info(database, self.vdi_type)
        extend_info.meta_info.update({
            f"{database}": database_meta_info
        })
        if code != ExecCmdResult.SUCCESS:
            log.error(f"Failed to backup database: {database}, out: {std_out}, err: {std_err}")
            self.fail_list.append(database)
        else:
            log.info(f"Succeed to backup database: {database}")
        SubJobExecResult.SUCCESS_NUM += 1
        Progress.PROGRESS += Progress.INTERVAL_PROGRESS
        self.running_tail.log_info_param = [
            self._sub_job_id, f"{self.database_num - SubJobExecResult.SUCCESS_NUM}", f"{SubJobExecResult.SUCCESS_NUM}"
        ]
        self.write_progress_to_file(SubJobStatusEnum.RUNNING.value, Progress.PROGRESS, self.running_tail,
                                    SQLServerProgressFileType.COMMON)

    def start_backup(self, extend_info):
        log.info(f"Start to backup. pid: {self._p_id} jobId: {self._job_id}")
        try:
            Progress.INTERVAL_PROGRESS = int(Progress.MAX_PROGRESS / self.database_num)
        except Exception as e:
            log.error(f'Exception when calculate the INTERVAL_PROGRESS as err: {str(e)}')
            return False
        threads = list()
        for database in self.database_list:
            backup_thread = Thread(target=self.backup_thread, args=(database, extend_info))
            threads.append(backup_thread)
        if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
            extend_info.begin_time = self.get_data_start_time(extend_info)
        else:
            extend_info.begin_time = int(time.time())
        for thr in threads:
            thr.start()
        for thr in threads:
            if thr.is_alive():
                thr.join()
        # 部分成功上报失败
        if self.fail_list:
            log.error(f"Failed to backup part of databases: {self.fail_list}")
            return False
        extend_info.end_time = int(time.time())
        extend_info.backup_time = extend_info.end_time
        extend_info.offset_to_utc = get_offset_to_utc()
        self.write_tmp_file(extend_info.json(by_alias=True), SQLTmpFileName.COPY_INFO_FILE)
        # 保存sqlite信息
        self.save_sqlite_info(self.database_list)
        log.info(f"End to backup. pid: {self._p_id} jobId: {self._job_id}")
        self.success_logtail.log_info_param = [self._sub_job_id, f"{self.database_num}"]
        self.write_progress_to_file(SubJobStatusEnum.COMPLETED.value, Progress.PROGRESS_ONE_HUNDRED,
                                    self.success_logtail, SQLServerProgressFileType.COMMON)
        return True

    def backup(self):
        """
        可用性组备份
        :return:
        """
        log.info(f"Start to backup. pid: {self._p_id} jobId: {self._job_id}")
        self.prepare_user_infos()
        self.convert_vdi_type()
        if not self.prepare_path_param():
            log.error(f"Failed to get repositories info. pid: {self._p_id} jobId: {self._job_id}")
            return False
        self.database_list = self.sql_utils.get_always_on_database(self.group_name)
        self.database_num = len(self.database_list)
        # 开始备份前记录备份进度
        self.running_tail.log_info_param = [self._sub_job_id, f"{self.database_num}", "0"]
        self.write_progress_to_file(SubJobStatusEnum.RUNNING.value, Progress.PROGRESS, self.running_tail,
                                    SQLServerProgressFileType.COMMON)
        # 备份前查询数据库相关文件
        self.save_meta_info(self.database_list, "meta.info", True)
        copy_extend_info = ExtendInfo()
        if self.backup_type == BackupTypeEnum.FULL_BACKUP.value:
            # 全量备份增加节点、副本信息
            copy_extend_info.database_list = self.database_list
            node_info = self.sql_utils.get_cluster_node_info(self.group_name)
            replica_info = self.sql_utils.get_availability_replicas(self.group_name)
            copy_extend_info.node_info = node_info
            copy_extend_info.replica_info = replica_info
            copy_extend_info.version = self.sql_utils.get_version()
        return self.start_backup(copy_extend_info)

    def exec_sub_job(self):
        """
        可用性组备份入口
        :return:
        """
        try:
            ret = self.backup()
        except Exception as err:
            self.write_progress_to_file(SubJobStatusEnum.FAILED.value, Progress.PROGRESS_ONE_HUNDRED,
                                        self.error_logtail, SQLServerProgressFileType.COMMON)
            log.error(f"Exception when backup as err: {err}. pid: {self._p_id} jobId: {self._job_id}")
            return False
        self.update_status_file(ret)
        if not ret:
            self.write_progress_to_file(SubJobStatusEnum.FAILED.value, Progress.PROGRESS_ONE_HUNDRED,
                                        self.error_logtail, SQLServerProgressFileType.COMMON)
        log.info(f"Backup result is ret: {ret}. pid: {self._p_id} jobId: {self._job_id}")
        return ret

    def query_copy_info(self):
        return self.query_copy_infos(self.backup_type)
