#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import re
import shutil
import time

from common.common import is_clone_file_system
from common.const import CopyDataTypeEnum
from common import common
from common.logger import Logger
from common.number_const import NumberConst
from db2.comm.const import Db2Const, Db2CmdFormat, Db2Regex
from db2.comm.constant import ParamField
from db2.comm.util.common_util import Db2CommonUtil
from db2.comm.util.dpf_util import DpfUtil
from db2.comm.util.param_util import Db2ParamUtil
from db2.restore.dpf.dpf_db_restore_service import DpfDbRestoreService
from db2.restore.hadr.hadr_parse_restore_params import HadrParseRestoreParams
from db2.restore.hadr.hadr_restore_service import HadrRestoreService
from db2.restore.single.single_db_restore_service import SingleRestoreService

LOGGER = Logger().get_logger(filename="db2.log")


class HadrDbRestoreService(HadrRestoreService):

    def __init__(self, parse_params_obj: HadrParseRestoreParams):
        super().__init__(parse_params_obj)

    @staticmethod
    def build_restore_dbpath_param(db_install_path):
        return f" to {db_install_path}" if db_install_path else ""

    @staticmethod
    def cp_logs_to_active_log_dir(src_log_dir, active_log_dir, user_id, group_id, needed_logs=None):
        cp_logs = set()
        for root, _, files in os.walk(src_log_dir):
            for f_name in files:
                if not re.match(Db2Regex.LOG_NAME_REG, f_name):
                    continue
                # 如果指定了拷贝范围
                if needed_logs and f_name not in needed_logs:
                    continue
                tmp_log_file = os.path.realpath(os.path.join(root, f_name))
                tmp_tgt_log_file = os.path.realpath(os.path.join(active_log_dir, f_name))
                if os.path.exists(tmp_tgt_log_file):
                    LOGGER.info(f"The target active log: {tmp_tgt_log_file} already exists, need remove.")
                    os.remove(tmp_tgt_log_file)
                shutil.copy(tmp_log_file, active_log_dir)
                os.lchown(tmp_tgt_log_file, user_id, group_id)
                cp_logs.add(f_name)
                LOGGER.info(f"Copy archive log: {tmp_log_file} to active log directory: {active_log_dir} success.")
        return cp_logs

    def exec_db_full_restore(self):
        logtarget = self.get_logtarget_bind_path()
        self.delete_db_log()
        copy_all = self.parse_param.get_copies()
        full_copy_path = Db2ParamUtil.get_full_copy_path_when_restore(self.parse_param.param_dict)
        full_copy = copy_all[0]
        copy_type = full_copy.get(ParamField.TYPE, "")
        if copy_type in (CopyDataTypeEnum.TAP_ARCHIVE.value, CopyDataTypeEnum.S3_ARCHIVE.value):
            full_copy = full_copy.get(ParamField.EXTEND_INFO, {})
        db_name = self.parse_param.get_db_name_from_copy(copy_all[0])
        tmp_copy_bak_time = self.parse_param.get_timestamp_from_copy(full_copy)
        is_new_loc = True if self.parse_param.get_uuid_from_copy(full_copy) != self.agent_id else False
        if is_clone_file_system(self.parse_param.param_dict):
            Db2CommonUtil.modify_path_permissions(full_copy_path)
        log_target_cmd = f"logtarget {logtarget} "
        if is_new_loc:
            db_install_path = self.parse_param.get_local_db_directory_from_database_node(self.local_database_node)
            db_path_cmd = HadrDbRestoreService.build_restore_dbpath_param(db_install_path)
            restore_cmd = Db2CmdFormat.DB_RESTORE_NEW.format(self.os_user, db_name, "", full_copy_path,
                                                             tmp_copy_bak_time, db_path_cmd, self.db_name,
                                                             log_target_cmd)
        else:
            restore_cmd = Db2CmdFormat.DB_RESTORE.format(self.os_user, db_name, "", full_copy_path, tmp_copy_bak_time,
                                                         self.db_name, log_target_cmd)
        try:
            SingleRestoreService.execute_restore_db(restore_cmd)
        except Exception as exception_str:
            LOGGER.exception(exception_str, exc_info=True)
            return False
        LOGGER.info(f"Execute execute full restore completed.")
        return True

    def exec_db_incr_restore(self):
        logtarget = self.get_logtarget_bind_path()
        self.delete_db_log()
        full_copy = self.parse_param.get_copies()[0]
        db_name = self.parse_param.get_db_name_from_copy(full_copy)
        path = Db2ParamUtil.get_copy_path_info(self.parse_param.param_dict)
        # 新位置
        is_new_loc = True if self.parse_param.get_uuid_from_copy(full_copy) != self.agent_id else False
        db_install_path = ""
        if is_new_loc:
            db_install_path = self.parse_param.get_local_db_directory_from_database_node(self.local_database_node)
        db_path_cmd = HadrDbRestoreService.build_restore_dbpath_param(db_install_path)
        # 手动恢复顺序：最后一个差异->全量->依次从第一个差异到最后一个差异
        path.insert(0, path[-1])
        restore_num = len(path)
        for idx, tmp_restore_copy in enumerate(path):
            # 只在最后一次restore时提取日志
            log_target_cmd = f"logtarget {logtarget} " if idx == (restore_num - 1) else ""
            copy_path = self.parse_param.get_data_path(tmp_restore_copy)
            if is_clone_file_system(self.parse_param.param_dict):
                Db2CommonUtil.modify_path_permissions(copy_path)
            tmp_copy_timestamp = self.parse_param.get_restore_time(tmp_restore_copy)
            if is_new_loc:
                restore_cmd = Db2CmdFormat.DB_RESTORE_NEW.format(self.os_user, db_name, Db2Const.INCREMENTAL,
                                                                 copy_path, tmp_copy_timestamp, db_path_cmd,
                                                                 self.db_name, log_target_cmd)
            else:
                restore_cmd = Db2CmdFormat.DB_RESTORE.format(self.os_user, db_name, Db2Const.INCREMENTAL,
                                                             copy_path, tmp_copy_timestamp, self.db_name,
                                                             log_target_cmd)
            try:
                SingleRestoreService.execute_restore_db(restore_cmd)
            except Exception as exception_str:
                LOGGER.exception(exception_str, exc_info=True)
                return False
        LOGGER.info(f"Execute execute incr restore completed.")
        return True

    def exec_db_diff_restore(self):
        self.delete_db_log()
        logtarget = self.get_logtarget_bind_path()
        full_copy = self.parse_param.get_copies()[0]
        db_name = self.parse_param.get_db_name_from_copy(full_copy)
        path = Db2ParamUtil.get_copy_path_info(self.parse_param.param_dict)
        # 手动恢复顺序：最后一个增量->全量->依次从第一个增量到最后一个增量
        path.insert(0, path[-1])
        # 新位置
        is_new_loc = True if self.parse_param.get_uuid_from_copy(full_copy) != self.agent_id else False
        db_install_path = ""
        if is_new_loc:
            db_install_path = self.parse_param.get_local_db_directory_from_database_node(self.local_database_node)
        db_path_cmd = HadrDbRestoreService.build_restore_dbpath_param(db_install_path)
        restore_num = len(path)
        for idx, tmp_restore_copy in enumerate(path):
            # 只在最后一次restore时提取日志
            log_target_cmd = f"logtarget {logtarget} " if idx == (restore_num - 1) else ""
            copy_path = self.parse_param.get_data_path(tmp_restore_copy)
            if is_clone_file_system(self.parse_param.param_dict):
                Db2CommonUtil.modify_path_permissions(copy_path)
            tmp_copy_timestamp = self.parse_param.get_restore_time(tmp_restore_copy)
            if is_new_loc:
                restore_cmd = Db2CmdFormat.DB_RESTORE_NEW.format(self.os_user, db_name, Db2Const.INCREMENTAL,
                                                                 copy_path, tmp_copy_timestamp, db_path_cmd,
                                                                 self.db_name, log_target_cmd)
            else:
                restore_cmd = Db2CmdFormat.DB_RESTORE.format(self.os_user, db_name, Db2Const.INCREMENTAL,
                                                             copy_path, tmp_copy_timestamp, self.db_name,
                                                             log_target_cmd)
            try:
                SingleRestoreService.execute_restore_db(restore_cmd)
            except Exception as exception_str:
                LOGGER.exception(exception_str, exc_info=True)
                return False
        LOGGER.info(f"Execute execute incr restore completed.")
        return True

    def cp_extracted_logs_after_data_copy_restore_on_standby(self):
        """备节点上数据副本恢复后拷贝副本提取日志到活动日志目录"""
        if self.check_node_is_primary():
            LOGGER.info("The primary node does not need to copy extracted logs.")
            return
        # 获取当前备节点数据库活动日志目录
        active_log_dir = Db2CommonUtil.get_log_path_val_of_db(self.os_user, self.db_name)
        if not os.path.isdir(active_log_dir):
            LOGGER.warning(f"The active log dir: {active_log_dir} of database: {self.db_name} is invalid.")
            return
        user_id = Db2CommonUtil.get_os_user_id_by_os_user(self.os_user)
        group_id = Db2CommonUtil.get_group_id_by_os_user(self.os_user)
        log_tar_dir = self.get_logtarget_bind_path()
        HadrDbRestoreService.cp_logs_to_active_log_dir(log_tar_dir, active_log_dir, user_id, group_id)
        LOGGER.info("Copy extracted logs success after data copy restore on standby node.")

    def exec_log_restore(self):
        # （主节点）日志副本恢复
        if self.check_node_is_primary():
            if not self.exec_log_restore_primary():
                LOGGER.error("Failed to execute the database primary restore(log) command.")
                return False
            return True
        # （备节点）日志副本恢复
        if not self.exec_db_full_restore():
            LOGGER.error("Failed to execute the database standby restore(log) command.")
            return False
        # 日志副本恢复，备节点在启动HADR前需要将“全量副本提取日志+主节点前滚后非活动日志+前两者之间所有日志”拷贝到活动日志目录
        # 检测主节点前滚是否已完成，完成后再拷贝日志
        if not self.check_primary_rwf_status():
            LOGGER.error("Check primary rollforward failed on standby node.")
            return False
        # 获取当前备节点数据库活动日志目录
        active_log_dir = Db2CommonUtil.get_log_path_val_of_db(self.os_user, self.db_name)
        if not os.path.isdir(active_log_dir):
            LOGGER.error(f"The active log dir: {active_log_dir} of database: {self.db_name} is invalid.")
            return False
        user_id = Db2CommonUtil.get_os_user_id_by_os_user(self.os_user)
        group_id = Db2CommonUtil.get_group_id_by_os_user(self.os_user)
        # (1)此处使用非共享目录，只拷贝全量副本中提取的日志到备节点活动日志目录
        log_tar_dir = self.get_logtarget_bind_path()
        extract_logs = HadrDbRestoreService.cp_logs_to_active_log_dir(
            log_tar_dir, active_log_dir, user_id, group_id)
        # (2)拷贝主节点前滚后非活动日志到备节点活动日志目录
        mnt_cache_path = Db2ParamUtil.get_cache_mount_path_for_restore(self.parse_param.param_dict)
        tmp_pri_log_dir = os.path.realpath(os.path.join(mnt_cache_path, Db2Const.TMP_PRI_LOG_DIR))
        pri_logs = HadrDbRestoreService.cp_logs_to_active_log_dir(
            tmp_pri_log_dir, active_log_dir, user_id, group_id)
        LOGGER.info(f"The extracted logs: {extract_logs}, primary non active log: {pri_logs}")
        merged_logs = extract_logs | pri_logs
        merged_nums = [
            int(re.match(Db2Regex.EXTRACT_LOG_NUM_REG, str(i)).groups()[0])
            for i in merged_logs
            if re.match(Db2Regex.EXTRACT_LOG_NUM_REG, str(i))
        ]
        all_logs = [f'S{str(i).rjust(7, "0")}.LOG' for i in range(min(merged_nums), max(merged_nums) + 1)]
        # (3)拷贝前两者之间所有日志到备节点活动日志目录
        other_logs = set(all_logs).difference(merged_logs)
        if not other_logs:
            LOGGER.info("No need copy logs of copies, execute standby log restore completed.")
            return True
        LOGGER.info(f"Need copy logs of copies to active log directory, needed logs: {other_logs}.")
        log_copy_paths = Db2ParamUtil.get_log_copy_paths_for_hadr(self.parse_param.param_dict, self.parse_param.job_id)
        cal_tmp_logs = set()
        for tmp_log_copy_path in log_copy_paths:
            tmp_logs = HadrDbRestoreService.cp_logs_to_active_log_dir(
                tmp_log_copy_path, active_log_dir, user_id, group_id, needed_logs=other_logs)
            cal_tmp_logs = cal_tmp_logs | tmp_logs
        LOGGER.info(f"Copy logs from copy to active log directory completed, needed logs: {other_logs}, "
                    f"copied logs: {cal_tmp_logs}")
        if cal_tmp_logs != other_logs:
            LOGGER.error(f"Copy logs of copies to active log directory failed, execute standby log restore failed.")
            return False
        LOGGER.info(f"Copy archive log to active log directory success, execute standby log restore completed.")
        return True

    def exec_log_restore_primary(self):
        self.delete_db_log()
        copies = Db2ParamUtil.parse_copies(self.parse_param.param_dict)
        log_copy_paths = Db2ParamUtil.get_log_copy_paths_for_hadr(self.parse_param.param_dict, self.parse_param.job_id)
        if not log_copy_paths:
            LOGGER.error("The list of log copy paths is empty.")
            raise Exception("The list of log copy paths is empty")
        full_copy_path = Db2ParamUtil.get_full_copy_path_when_restore(self.parse_param.param_dict)
        if is_clone_file_system(self.parse_param.param_dict):
            Db2CommonUtil.modify_path_permissions(full_copy_path)
        full_copy = Db2ParamUtil.get_copies_by_copy_type(copies, CopyDataTypeEnum.FULL_COPY.value)[0]
        copy_db_name = self.parse_param.get_db_name_from_copy(full_copy)
        full_bak_time = Db2ParamUtil.handle_backup_image_timestamp_of_copy(full_copy)
        full_bak_end_time = Db2ParamUtil.get_backup_end_time_of_copy(full_copy)
        log_tar_dir = self.get_logtarget_bind_path()
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        Db2CommonUtil.disconnect_db_application(self.os_user, self.db_name)
        if Db2ParamUtil.is_new_location_restore(self.parse_param.param_dict):
            db_install_path = Db2CommonUtil.get_db_info_dict(self.os_user).get(self.db_name, {}). \
                get(ParamField.LOCAL_DATABASE_DIRECTORY, "")
            db_path_cmd = HadrDbRestoreService.build_restore_dbpath_param(db_install_path)
            full_restore_db_cmd = f'su - {self.os_user} -c "db2 restore db {copy_db_name} ' \
                                  f'from {full_copy_path} ' \
                                  f'taken at {full_bak_time}{db_path_cmd} ' \
                                  f'into {self.db_name} ' \
                                  f'logtarget {log_tar_dir} ' \
                                  f'replace existing without prompting"'
        else:
            full_restore_db_cmd = f'su - {self.os_user} -c "db2 restore db {self.db_name} ' \
                                  f'from {full_copy_path} ' \
                                  f'taken at {full_bak_time} ' \
                                  f'logtarget {log_tar_dir} ' \
                                  f'without prompting"'
        LOGGER.info("Execute exec_log_restore_primary success.")
        return self.start_exec_rollcmd(full_restore_db_cmd, log_copy_paths, log_tar_dir, full_bak_end_time)

    def start_exec_rollcmd(self, full_restore_db_cmd, log_copy_paths, log_tar_dir, full_bak_end_time):
        restore_timestamp = Db2ParamUtil.get_restore_timestamp(self.parse_param.param_dict)
        restore_time = Db2CommonUtil.convert_pitr_timestamp_to_local_time(restore_timestamp)
        uid = Db2CommonUtil.get_os_user_id_by_os_user(self.os_user)
        gid = Db2CommonUtil.get_group_id_by_os_user(self.os_user)
        if is_clone_file_system(self.parse_param.param_dict):
            for tmp_log_path in log_copy_paths:
                _, _, _ = common.execute_cmd(f"chown -R -h {uid}:{gid} {tmp_log_path}")
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        rollforward_db_cmd = f'su - {self.os_user} -c "db2 rollforward database {self.db_name} to {restore_time} ' \
                             f'using local time and complete overflow log path \({log_tar_dir}\) noretrieve"'
        # 开始重试恢复：
        complete_flag = False
        retry_nums = 0
        while retry_nums < 2:
            common.clean_dir(log_tar_dir)
            # restore前清空归档目录
            self.clear_archive_log_dir()
            retry_nums += 1
            if retry_nums == 2:
                restore_time = Db2CommonUtil.get_roll_status_time(self.os_user, self.db_name)
                if full_bak_end_time \
                        and DpfDbRestoreService.parse_time_hold_to_sec(restore_time) <= full_bak_end_time:
                    rollforward_db_cmd = f'su - {self.os_user} -c "db2 rollforward database {self.db_name} ' \
                                         f'to end of backup and complete overflow log path \({log_tar_dir}\)"'
                else:
                    rollforward_db_cmd = f'su - {self.os_user} -c "db2 rollforward database {self.db_name} ' \
                                         f'to {restore_time} using local time and ' \
                                         f'complete overflow log path \({log_tar_dir}\) noretrieve"'
                # 恢复前清空活动日志目录
                self.delete_db_log()
            LOGGER.info(f"Start try restore. Retry nums: {retry_nums}")
            # 执行恢复
            try:
                SingleRestoreService.execute_restore_db(full_restore_db_cmd)
            except Exception as e_info:
                LOGGER.error(f"Retry restore failed. retry num: {retry_nums}")
                continue
            # 前滚前清空归档目录
            self.clear_archive_log_dir()
            # 将各日志副本拷贝到全量副本提取日志目录
            for tmp_copy_path in log_copy_paths:
                DpfDbRestoreService.backup_arch_log_to_target_dir(self.os_user, tmp_copy_path, log_tar_dir)
            # 执行回滚
            try:
                SingleRestoreService.execute_rollforward_db(rollforward_db_cmd)
            except Exception as e_info:
                LOGGER.error(f"Retry rollforward failed. retry num: {retry_nums}")
                continue
            complete_flag = True
            break
        if not complete_flag:
            self.write_primary_node_rollforward_flag(False)
            return False
        LOGGER.info(f"Log restore success.")
        # 先备份主节点非活动日志，再写前滚结果
        self.backup_primary_non_active_logs_after_rfw()
        self.write_primary_node_rollforward_flag(True)
        return True

    def clear_archive_log_dir(self):
        log_arch_meth_val = Db2CommonUtil.get_log_arch_meth_val_of_db(self.os_user, self.db_name)
        if Db2CommonUtil.check_log_arch_meth_is_dir(log_arch_meth_val):
            arch_log_path = os.path.realpath(os.path.join(log_arch_meth_val, self.instance_name, self.db_name))
            Db2CommonUtil.remove_file_if_exists(arch_log_path)
            Db2CommonUtil.clear_dir_if_exists(arch_log_path)

    def write_primary_node_rollforward_flag(self, is_rollforwarded):
        """写入日志备份依赖副本的信息"""
        job_id = Db2ParamUtil.get_job_id(self.parse_param.param_dict)
        mnt_cache_path = Db2ParamUtil.get_cache_mount_path_for_restore(self.parse_param.param_dict)
        LOGGER.info(f"Writing primary node rollforward info, job id: {job_id}, is rollforwarded: {is_rollforwarded}.")
        pri_rfw_f_name = f"{job_id}_{Db2Const.HADR_PRI_ROLLFORWARD_TAG}_{is_rollforwarded}"
        pri_rfw_f_path = os.path.realpath(os.path.join(mnt_cache_path, pri_rfw_f_name))
        common.touch_file(pri_rfw_f_path)
        LOGGER.info(f"Write primary node rollforward info info success, job id: {job_id}.")

    def check_primary_rwf_status(self):
        """在备节点检查主节点前滚是否已完成"""
        job_id = Db2ParamUtil.get_job_id(self.parse_param.param_dict)
        LOGGER.info(f"Start checking primary rollfoward status on standby node, job id: {job_id}.")
        mnt_cache_path = Db2ParamUtil.get_cache_mount_path_for_restore(self.parse_param.param_dict)
        cur_time = int(time.time())
        timeout_time = cur_time + Db2Const.HADR_CHK_PRI_RFW_TIMEOUT
        log_integrity_reg = f"{job_id}_{Db2Const.HADR_PRI_ROLLFORWARD_TAG}_{Db2Regex.PRI_RFW_FLAG_END_TAG}"
        while int(time.time()) <= timeout_time:
            for f_name in os.listdir(mnt_cache_path):
                mat_ret = re.search(log_integrity_reg, f_name)
                if not mat_ret:
                    continue
                is_completed = ("True" == mat_ret.groups()[0])
                if not is_completed:
                    LOGGER.error(f"Check primary node rollforward status on standby node failed.")
                    return False
                LOGGER.info(f"Check primary node rollfoward status on standby node completed.")
                return True
            LOGGER.debug(f"Check primary node rollfoward status on standby node running ...")
            time.sleep(NumberConst.TEN)
        LOGGER.warning(f"Check primary node rollfoward status on standby node timeout, job id: {job_id}.")
        return False

    def backup_primary_non_active_logs_after_rfw(self):
        """主节点前滚后将非活动日志备份"""
        active_log_dir = Db2CommonUtil.get_log_path_val_of_db(self.os_user, self.db_name)
        if not os.path.isdir(active_log_dir):
            LOGGER.error(f"The active log dir: {active_log_dir} of database: {self.db_name} is invalid.")
            return
        fir_active_log = Db2CommonUtil.get_first_active_log_file_of_db(self.os_user, self.db_name)
        if not re.match(Db2Regex.LOG_NAME_REG, fir_active_log):
            LOGGER.error(f"The first active log file: {fir_active_log} of database: {self.db_name} is invalid.")
            return
        user_id = Db2CommonUtil.get_os_user_id_by_os_user(self.os_user)
        group_id = Db2CommonUtil.get_group_id_by_os_user(self.os_user)
        mnt_cache_path = Db2ParamUtil.get_cache_mount_path_for_restore(self.parse_param.param_dict)
        tmp_log_dir = DpfUtil.create_tmp_primary_log_dir(mnt_cache_path, self.os_user)
        for f_name in os.listdir(active_log_dir):
            if not re.match(Db2Regex.LOG_NAME_REG, f_name):
                continue
            # 只备份非活动日志，大于等于“First active log file”的不处理
            if str(f_name) >= str(fir_active_log):
                continue
            tmp_log_file = os.path.realpath(os.path.join(active_log_dir, f_name))
            tmp_tgt_log_file = os.path.realpath(os.path.join(tmp_log_dir, f_name))
            if os.path.exists(tmp_tgt_log_file):
                LOGGER.info(f"The target temp primary log: {tmp_tgt_log_file} already exists, need remove.")
                os.remove(tmp_tgt_log_file)
            shutil.copy(tmp_log_file, tmp_log_dir)
            os.lchown(tmp_tgt_log_file, user_id, group_id)
            LOGGER.info(f"Copy archive log: {tmp_log_file} to temp primary log directory: {active_log_dir} success.")

    def exec_db_rollforward(self):
        logtarget = self.get_logtarget_bind_path()
        roll_cmd = Db2CmdFormat.DB_ROLL.format(self.os_user, self.db_name, logtarget)
        try:
            SingleRestoreService.execute_rollforward_db(roll_cmd)
        except Exception as exception_str:
            LOGGER.exception(exception_str, exc_info=True)
            return False
        return True
