#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import json
from common import common
from common.common_models import JobPermissionInfo, ActionResult, Copy, LogDetail
from common.const import BackupTypeEnum, ExecuteResultEnum, SubJobStatusEnum, RepositoryDataTypeEnum, DBLogLevel
from common.logger import Logger
from common.number_const import NumberConst
from common.util.scanner_utils import scan_dir_size
from db2.backup.db2_backup_base import Db2BackupBase
from db2.backup.dpf.dpf_ts_backup_service import DpfTsBackupService
from db2.backup.util.db2_backup_util import Db2BackupUtil
from db2.comm.const import Db2CommonConst, Db2Const, Db2JsonConstant
from db2.comm.constant import ParamField
from db2.comm.db2_exception import ErrCodeException
from db2.comm.db2_verification import check_database_pending_status, is_trans_full_backup
from db2.comm.error_code import Db2ErrCode
from db2.comm.util.common_util import Db2CommonUtil
from db2.comm.util.dpf_util import DpfUtil
from db2.comm.util.job_decorators import job_exception_decorator, job_exception_output_decorator
from db2.comm.util.param_util import Db2ParamUtil

LOGGER = Logger().get_logger(filename="db2.log")


class DpfTsBackup(Db2BackupBase):
    """Db2表空间集备份任务执行类
    """

    def __init__(self, task_name, pid, job_id, sub_job_id, param_dict):
        super().__init__(task_name, pid, job_id, sub_job_id, param_dict)
        self._os_user = Db2ParamUtil.get_os_user(self.task_name, self.pid)
        self._backup_type = Db2ParamUtil.get_backup_type(self.param_dict)
        self._db_name = Db2ParamUtil.get_db_name_for_ts(self.param_dict)
        # 获取备份表空间列表
        self._tablespace_list = Db2ParamUtil.get_backup_table_spaces(self.param_dict)
        self.log_detail = LogDetail(logLevel=DBLogLevel.INFO.value)

    @job_exception_decorator()
    def allow_backup_in_local_node(self):
        # 检查用户是否存在
        if not self._os_user:
            LOGGER.error(f"Failed to get os username: {self._os_user}, job id: {self.job_id}.")
            return Db2CommonUtil.build_action_result(
                code=ExecuteResultEnum.INTERNAL_ERROR.value, message="The OS user does not exists",
                body_err=Db2ErrCode.OS_USER_NOT_EXISTS)
        # 检查连通性
        connect_status = Db2CommonUtil.check_process_service(self._os_user)
        if not connect_status:
            LOGGER.error(f"Check dpf instance connection result: {connect_status}.")
            return Db2CommonUtil.build_action_result(
                code=ExecuteResultEnum.INTERNAL_ERROR.value, message="The DPF cluster status is abnormal",
                body_err=Db2ErrCode.DB_SERVICE_ERROR)
        # 是否开启日志归档
        log_arch_meth_val = Db2CommonUtil.get_log_arch_meth_val_of_db(self._os_user, self._db_name)
        if not log_arch_meth_val or log_arch_meth_val.upper() == "OFF":
            return Db2CommonUtil.build_action_result(
                code=ExecuteResultEnum.INTERNAL_ERROR.value,
                message="The DPF cluster database does not enable log archive method",
                body_err=Db2ErrCode.ARCHIVE_MODE_DISABLE)
        # 是否开启跟踪修改
        track_mod_val = Db2CommonUtil.get_track_mod_val_of_db(self._os_user, self._db_name)
        if not track_mod_val or track_mod_val.upper() != "YES":
            return Db2CommonUtil.build_action_result(
                code=ExecuteResultEnum.INTERNAL_ERROR.value,
                message="The DPF cluster database does not enable track modified pages",
                body_err=Db2ErrCode.INCREMENTAL_BACKUP_NOT_OPEN)
        # 是否存在penging状态的任务（Backup、Rollforward、Restore、Upgrade）
        if not check_database_pending_status(self._os_user, self._db_name):
            LOGGER.error("There are pending tasks in the DPF cluster database.")
            return Db2CommonUtil.build_action_result(
                code=ExecuteResultEnum.INTERNAL_ERROR.value,
                message="There are pending tasks in the DPF cluster database",
                body_err=Db2ErrCode.DATABASE_EXISTS_PENDING, err_params=[self._db_name])
        # 表空间集是否存在
        bak_ts_names = self._tablespace_list
        all_ts_dict = Db2CommonUtil.get_table_space_info_dict_of_db(self._os_user, self._db_name)
        if not bak_ts_names or not set(bak_ts_names).issubset(all_ts_dict.keys()):
            err_param = DpfUtil.build_table_space_not_exists_message(set(all_ts_dict.keys()), bak_ts_names)
            return Db2CommonUtil.build_action_result(
                code=ExecuteResultEnum.INTERNAL_ERROR.value,
                message="The table spaces of the DPF cluster database does not exists",
                body_err=Db2ErrCode.SPACE_NOT_EXISTS, err_params=[err_param])
        chk_bak_ts_state_ret = list(all_ts_dict.get(i, {}).get("state") == Db2Const.NORMAL_STATE for i in bak_ts_names)
        if not all(chk_bak_ts_state_ret):
            LOGGER.error(f"There are tablespaces whose state is not normal in the backup tablespaces, "
                         f"database name: {self._db_name}.")
            err_param = DpfUtil.build_table_space_abnormal_message(all_ts_dict, bak_ts_names)
            return Db2CommonUtil.build_action_result(
                code=ExecuteResultEnum.INTERNAL_ERROR.value,
                message="There are tablespaces whose state is not normal in the backup tablespaces",
                body_err=Db2ErrCode.SPACE_STATUS_ABNORMAL, err_params=[err_param])
        return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.SUCCESS.value)

    @job_exception_output_decorator
    def query_job_permission(self):
        if not self._os_user:
            LOGGER.error(f"Failed to get os username: {self._os_user}, pid: {self.pid}.")
            Db2CommonUtil.output_action_result(
                pid=self.pid, code=ExecuteResultEnum.INTERNAL_ERROR.value,
                body_err=Db2ErrCode.OS_USER_NOT_EXISTS)
            return
        group_name = Db2CommonUtil.get_group_name_by_os_user(self._os_user)
        output = JobPermissionInfo(user=self._os_user, group=group_name, fileMode="0750")
        common.output_result_file(self.pid, output.dict(by_alias=True))

    @job_exception_decorator(write_progress=True)
    def exec_backup_pre_job(self):
        self.sub_job_detail.progress = NumberConst.HUNDRED
        self.sub_job_detail.task_status = SubJobStatusEnum.COMPLETED.value
        Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)
        return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.SUCCESS.value)

    @job_exception_decorator(write_progress=True)
    def exec_backup(self):
        s_job_name = Db2ParamUtil.get_sub_job_name(self.param_dict)
        LOGGER.info(f"Start executing tablespaces backup subtask: {s_job_name}, pid: {self.pid}, "
                    f"job id: {self.job_id}, sub job id: {self.sub_job_id}")
        cache_path = Db2ParamUtil.get_repository_paths_for_backup(
            self.param_dict, RepositoryDataTypeEnum.CACHE_REPOSITORY.value)[0]
        # 挂载目录子任务，确保执行后续子任务时各节点已成功挂载目录
        if s_job_name == "mount_path_subtask":
            return self.mount_path_sub_task()

        self.sub_job_detail.progress = NumberConst.FIFTEEN
        Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)
        # 获取编目节点IP
        catalog_ip = Db2ParamUtil.get_catalog_ip_of_dpf_db(self.param_dict)
        if not catalog_ip:
            LOGGER.error("The catalog IP param is empty.")
            self.sub_job_detail.task_status = SubJobStatusEnum.FAILED.value
            Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)
            return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.INTERNAL_ERROR.value,
                                                     message="The catalog IP param is empty")
        # 非编目节点不做备份操作
        if not DpfUtil.is_catalog_node(catalog_ip):
            return self.not_backup()
        bak_ts_names = self._tablespace_list
        data_path = Db2ParamUtil.get_repository_paths_for_backup(
            self.param_dict, RepositoryDataTypeEnum.DATA_REPOSITORY.value)[0]
        backup_dir = ""
        backup_dir_prefix = Db2CommonConst.BACKUP_TYPE_DIR_PREFIX_MAP.get(self._backup_type)
        if backup_dir_prefix:
            backup_dir = os.path.realpath(os.path.join(data_path, f'{backup_dir_prefix}_{self.job_id}'))
            DpfTsBackupService.create_backup_dir(self._os_user, backup_dir)
        if self._backup_type == BackupTypeEnum.FULL_BACKUP.value:
            bak_time = DpfTsBackupService.exec_full_backup_ts(self._os_user, self._db_name, bak_ts_names, backup_dir)
        elif self._backup_type == BackupTypeEnum.INCRE_BACKUP.value:
            bak_time = DpfTsBackupService.exec_incr_backup_ts(self._os_user, self._db_name, bak_ts_names, backup_dir)
        elif self._backup_type == BackupTypeEnum.DIFF_BACKUP.value:
            bak_time = DpfTsBackupService.exec_diff_backup_ts(self._os_user, self._db_name, bak_ts_names, backup_dir)
        else:
            LOGGER.error(f"Unsupported backup type: {self._backup_type}.")
            self.sub_job_detail.task_status = SubJobStatusEnum.FAILED.value
            Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)
            return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.INTERNAL_ERROR.value,
                                                     message="Unsupported backup type")
        copy_info = self.build_copy_info(bak_time, self._os_user, backup_dir)
        Db2BackupUtil.report_backup_copy_info_by_rpc_tool(self.job_id, copy_info, cache_path)

        try:
            self.sub_job_detail.data_size = self.get_copy_size(backup_dir)
        except Exception as ex:
            LOGGER.exception(f"Can not get copy size, error:{ex}")

        self.sub_job_detail.progress = NumberConst.HUNDRED
        self.sub_job_detail.task_status = SubJobStatusEnum.COMPLETED.value
        Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)

        return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.SUCCESS.value)

    def mount_path_sub_task(self):
        self.sub_job_detail.progress = NumberConst.TEN
        self.sub_job_detail.task_status = SubJobStatusEnum.COMPLETED.value
        Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)
        return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.SUCCESS.value)

    def not_backup(self):
        LOGGER.info("Current node is no catalog node, no need backup.")
        self.sub_job_detail.task_status = SubJobStatusEnum.COMPLETED.value
        Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)
        return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.SUCCESS.value)

    def get_copy_size(self, copy_path):

        ret, size = scan_dir_size(self.job_id, copy_path)
        if ret:
            LOGGER.info(f"DB2 single backup copy size : {size}")
            return size
        LOGGER.error(f"Get DB2 single backup copy size file job_id:{self.job_id}")
        return NumberConst.ZERO

    def build_copy_info(self, bak_time, user_name, backup_path):
        copy_info = Copy(id=self.job_id)
        bak_timestamp = Db2CommonUtil.convert_backup_time_to_timestamp(bak_time)
        if not bak_timestamp:
            LOGGER.error(f'Empty back timestamp, job id: {self.job_id}.')
            raise ErrCodeException(Db2ErrCode.ERROR_COMMON_INVALID_PARAMETER)
        copy_info.timestamp = bak_timestamp
        copy_info.extend_info = {Db2JsonConstant.BAK_IMG_TIMESTAMP: bak_time}
        # 是否能做副本校验
        copy_info.extend_info[Db2JsonConstant.COPY_VERIFY_FILE] = "true"
        user_name = Db2ParamUtil.get_os_user(self.task_name, self.pid)
        tablespace_info = Db2CommonUtil.get_tablespace_info(user_name, self._db_name, self._tablespace_list)
        table_info = Db2CommonUtil.get_table_info(user_name, self._db_name, self._tablespace_list)
        copy_info.extend_info[ParamField.TABLESPACE_INFO] = json.dumps(tablespace_info)
        copy_info.extend_info[ParamField.TABLE_INFO] = json.dumps(table_info)
        copy_info.extend_info[Db2JsonConstant.COPY_ID] = self.job_id
        copy_info.repositories = DpfTsBackupService.query_copy_repos_after_backup(
            self.job_id, self.param_dict, self._backup_type)
        return copy_info

    @job_exception_decorator(write_progress=True)
    def exec_backup_post_job(self):
        self.sub_job_detail.progress = NumberConst.HUNDRED
        self.sub_job_detail.task_status = SubJobStatusEnum.COMPLETED.value
        Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)
        return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.SUCCESS.value)

    def query_backup_copy(self):
        pass

    @job_exception_decorator(write_progress=True)
    def abort_job(self):
        self.sub_job_detail.progress = NumberConst.FIVE
        Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)
        Db2CommonUtil.kill_agent_job_process(self.pid, self.job_id)
        DpfTsBackupService.abort_ts_backup_job(self._os_user, self._db_name)
        self.sub_job_detail.progress = NumberConst.HUNDRED
        self.sub_job_detail.task_status = SubJobStatusEnum.COMPLETED.value
        Db2CommonUtil.proactively_report_progress(self.pid, self.sub_job_detail)
        return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.SUCCESS.value)

    def check_backup_job_type(self):
        LOGGER.info(f"Check backup job type: {self._backup_type}.")
        if self._backup_type == BackupTypeEnum.FULL_BACKUP.value:
            return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.SUCCESS.value)
        data_repo_path = Db2ParamUtil.get_repository_paths_for_backup(
            self.param_dict, RepositoryDataTypeEnum.DATA_REPOSITORY.value)[0]
        # 判断增量备份、差异备份是否需要转全量备份
        if is_trans_full_backup(self._os_user, data_repo_path):
            LOGGER.info(f"When performing a incremental or differential backup, there is no full backup copy, "
                        f"and it will be converted to a full backup, backup type: {self._backup_type}.")
            return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.INTERNAL_ERROR.value,
                                                     body_err=Db2ErrCode.ERR_INC_TO_FULL)
        return Db2CommonUtil.build_action_result(code=ExecuteResultEnum.SUCCESS.value)

    def report_progress(self):
        cache_path = Db2ParamUtil.get_repository_paths_for_backup(
            self.param_dict, RepositoryDataTypeEnum.CACHE_REPOSITORY.value)[0]
        if self.sub_job_id:
            Db2BackupUtil.report_progress_utl(self.pid, self.sub_job_id, cache_path)
        else:
            Db2BackupUtil.report_progress_utl(self.pid, self.job_id, cache_path)

    def output_action_result(self, action_ret: ActionResult):
        super(DpfTsBackup, self).output_action_result(action_ret)
