#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import configparser
import functools
import os
import itertools
from datetime import datetime, timezone

from common.logger import Logger
from saphanay.resource.saphana import SAPHANA
from saphanay.comm.constants import SAPHANADirectoryStructure
from saphanay.comm.error_code_exception import ErrorCodeException
from saphanay.comm.constants import SAPHANAErrorCode
from saphanay.comm.common_utils import remove_paths

logger = Logger().get_logger()


class LogBackupSAPHANA(SAPHANA):

    """日志备份能力 SAP HANA"""

    def __init__(self, sid: str, db_name: str, db_port: str, db_user: str, db_pwd: str) -> None:
        super().__init__(sid, db_name, db_port, db_user, db_pwd)

    def read_global_initialization(self, section: str, option: str) -> str | None:
        """
        读取 SYSTEM 级别 global.ini 。键值不存在则返回 None 。
        :raise ErrorCodeException:
        """
        config = configparser.ConfigParser()
        file = SAPHANADirectoryStructure.GLOBAL_INI_FILE.format(SID=self._sid.upper())
        config.read(file)
        if section not in config:
            logger.warning(f"Section {section} not in global.ini. {file}")
            return None
        if option not in config[section]:
            logger.warning(f"Option {option} not in global.ini {section}. {file}")
            return None
        return config[section][option]

    @functools.cache
    def get_log_backup_path(self) -> str:
        """
        获取数据库日志备份路径。
        :raise ErrorCodeException:
        """
        base_path = self.read_global_initialization("persistence", "basepath_logbackup")
        if not base_path:
            logger.error(f"Failed to read basepath_logbackup.")
            raise ErrorCodeException(SAPHANAErrorCode.GET_LOG_BACKUP_PATH_FAIL)
        log_backup_path = os.path.join(
            base_path, f"DB_{self._db_name.upper()}" if self._db_name.upper() != "SYSTEMDB" else "SYSTEMDB")
        logger.info(f"Log backup path: {log_backup_path}")
        return log_backup_path

    @functools.cache
    def list_log_files(self) -> tuple[list[str], list[str]]:
        """
        列举出所有现存的日志文件名，包括 log 和 catalog 。
        :return: logs, catalogs
        :raise ErrorCodeException:
        """
        log_backup_path = self.get_log_backup_path()
        logs, catalogs = [], []
        for f in os.listdir(log_backup_path):
            if not f.startswith("log_backup_") or not self._get_backup_id_of_log_file(f):
                continue
            catalogs.append(f) if f.startswith("log_backup_0_0_0_0") else logs.append(f)
        logger.debug(f"List {len(logs)} logs. {log_backup_path} {logs}")
        logger.debug(f"List {len(catalogs)} catalogs. {log_backup_path} {catalogs}")
        return logs, catalogs

    def list_log_file_backup_ids(self) -> tuple[list[int], list[int]]:
        """
        列举出所有现存的日志文件 backup id ，包括 log 和 catalog 。
        :return: log backup ids, catalog backup ids
        :raise ErrorCodeException:
        """
        logs, catalogs = self.list_log_files()
        log_ids = list(map(self._get_backup_id_of_log_file, logs))
        catalog_ids = list(map(self._get_backup_id_of_log_file, catalogs))
        logger.debug(f"List {len(log_ids)} log backup ids. {log_ids}")
        logger.debug(f"List {len(catalog_ids)} catalog backup ids. {catalog_ids}")
        return log_ids, catalog_ids

    def select_log_file_backup_ids(self) -> tuple[list[int], int]:
        """
        选择可供备份的日志文件 backup id 。最晚的 catalog 和它之前的所有 log 。
        :return: log backup ids, catalog backup id
        :raise ErrorCodeException:
        """
        log_ids, catalog_ids = self.list_log_file_backup_ids()
        if not log_ids or not catalog_ids:
            logger.error(f"No new log to backup.")
            raise ErrorCodeException(SAPHANAErrorCode.NO_NEW_LOG_BACKUP)
        catalog_id = max(catalog_ids)
        log_ids = sorted([i for i in log_ids if i < catalog_id])
        if not log_ids:
            logger.error(f"No new log to backup.")
            raise ErrorCodeException(SAPHANAErrorCode.NO_NEW_LOG_BACKUP)
        logger.info(f"Select {len(log_ids)} log backup ids. {catalog_id} {log_ids}")
        return log_ids, catalog_id

    def query_log_backup_id_records(self, min_id: int, max_id: int) -> list[int]:
        """
        查询 M_BACKUP_CATALOG_FILES 中的所有 log backup id 记录，排除 topology 和 catalog 。
        :raise ErrorCodeException:
        """
        logger.info(f"Query log backup id record between {min_id} and {max_id}.")
        # backup id 是递增的
        # 数据备份 backup id ，同时会有 volume 和 topology 记录
        sql = (
            f"SELECT f.ENTRY_ID FROM M_BACKUP_CATALOG_FILES f JOIN M_BACKUP_CATALOG c ON f.ENTRY_ID = c.ENTRY_ID "
            f"WHERE f.ENTRY_ID BETWEEN {min_id} AND {max_id} AND f.SOURCE_TYPE_NAME = 'volume' AND f.ENTRY_ID NOT IN "
            f"(SELECT ENTRY_ID FROM M_BACKUP_CATALOG_FILES "
            f"WHERE ENTRY_ID BETWEEN {min_id} AND {max_id} AND SOURCE_TYPE_NAME = 'topology') "
            f"AND c.STATE_NAME = 'successful' "
            f"ORDER BY f.ENTRY_ID"
        )
        data = list(map(int, itertools.chain.from_iterable(self.execute_sql_command(sql, is_query=True))))
        logger.info(f"Got {len(data)} log backup id records between {min_id} and {max_id}. {data}")
        return data

    def merge_log_backup_ids(self, log_ids: list[int]) -> list[list[int, int]]:
        """
        将 log backup id 列表合并出一段段分别连续的 id 轴。
        :return: id_lines: 例如 [[1739588601431, 1739588615317], [1739588735350, 1739599221591]]
        :raise ErrorCodeException:
        """
        log_ids.sort()
        min_id, max_id = log_ids[0], log_ids[-1]
        data = self.query_log_backup_id_records(min_id, max_id)
        missing, extra = list(set(data) - set(log_ids)), list(set(log_ids) - set(data))
        missing and logger.warning(f"{len(missing)} log backup ids are missing. {missing}")
        extra and logger.warning(f"{len(extra)} log backup ids are not found in M_BACKUP_CATALOG_FILES. {extra}")

        id_lines = []
        id_index = {_id: i for i, _id in enumerate(data)}
        for _id in log_ids:
            # 忽略在 M_BACKUP_CATALOG_FILES 中找不到记录的 log id
            if _id not in id_index:
                continue
            if id_lines and id_index[id_lines[-1][-1]] + 1 == id_index[_id]:
                id_lines[-1][-1] = _id
            else:
                id_lines.append([_id, _id])
        logger.info(f"Merged {len(log_ids)} log backup ids to {len(id_lines)} lines. {id_lines}")
        return id_lines

    def query_is_log_continuous(self, start_id: int, end_id: int) -> bool:
        """
        判断两个 backup id 是否是关于 log 连续的。
        :raise ErrorCodeException:
        """
        # 两个 backup id 间隔大于 log_backup_timeout_s 则认为不连续
        timeout = self.read_global_initialization("persistence", "log_backup_timeout_s")
        timeout = int(timeout) if timeout and timeout.isdigit() else 900
        logger.info(f"log_backup_timeout_s: {timeout}")
        # backup id 是毫秒时间戳
        if (timeout + 60) * 1000 < end_id - start_id:
            logger.info(f"Backup ids are not continuous. Interval too long. {start_id} {end_id} {timeout}")
            return False

        # end 小于 start ，则认为连续
        if end_id < start_id:
            logger.info(f"Backup ids are continuous. End id is lower than start id. {start_id} {end_id}")
            return True

        # 两个 backup id 中间无其他 log backup id ，则认为连续
        data = self.query_log_backup_id_records(start_id, end_id)
        missing = list(set(data) - {start_id, end_id})
        missing and logger.warning(f"Backup ids are not continuous. Missing during logs. {data} {start_id} {end_id}")
        not missing and logger.info(f"Backup ids are continuous. No during logs. {start_id} {end_id}")
        return not missing

    def clean_log_backup_path(self, max_id: int) -> None:
        """
        删除 backup id 小于等于 max_id 的日志文件。
        :raise ErrorCodeException:
        """
        log_backup_path = self.get_log_backup_path()
        logs, catalogs = self.list_log_files()
        paths = ([os.path.join(log_backup_path, f) for f in logs if self._get_backup_id_of_log_file(f) <= max_id] +
                 [os.path.join(log_backup_path, f) for f in catalogs if self._get_backup_id_of_log_file(f) <= max_id])
        remove_paths(paths)

    def _get_backup_id_of_log_file(self, log_file: str) -> int:
        """获取日志文件对应的 backup id 。异常值为 0 。"""
        _id = os.path.splitext(log_file)[1][1:]
        return int(_id) if _id.isdigit() else 0

    def get_log_files_by_backup_id(self, backup_ids: list[int]) -> list[str]:
        """
        获取 backup id 对应的日志文件名列表。
        :raise ErrorCodeException:
        """
        logs, catalogs = self.list_log_files()
        log_files = ([f for f in logs if self._get_backup_id_of_log_file(f) in backup_ids] +
                     [f for f in catalogs if self._get_backup_id_of_log_file(f) in backup_ids])
        logger.debug(f"Got {len(log_files)} log files by {len(backup_ids)} backup ids. {backup_ids} {log_files}")
        return log_files

