#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import shutil
import socket
import stat
import threading
import time
from typing import Optional
from pathlib import Path
import shutil

from common.common import execute_cmd
from common.common_models import CopyInfoRepModel, Copy, ReportCopyInfoModel
from common.const import SubJobStatusEnum, RepositoryDataTypeEnum, CMDResult, CopyDataTypeEnum, RpcToolInterface
from common.exception.common_exception import ErrCodeException
from common.util.backup import backup_files, query_progress, backup_dirs
from generaldb.saphana.backup.saphana_parse_backup_params import SaphanaCopyInfoParam
from generaldb.saphana.comm.common_util import log
from generaldb.saphana.comm.saphana_common_function import CommonFuction
from generaldb.saphana.comm.saphana_const import SaphanaConst, SaphanaDbActionType, SaphanaStateName, \
    SaphanaLogBackupStage, SaphanaErrorCode, SaphanaMetadataKey, SaphanaJsonConstant, SaphanaFileBackupRet, \
    LogFileCopyMax, CommitTimeType, SaphanaRpcParamKey, SapnanBackupType
from saphana.resource.saphana_cluster_manager import SaphanaClusterManage
from saphana.backup.saphana_backup_parent import SaphanaBackupParent
from saphana.backup.saphana_parse_backup_params import SaphanaParseBackupParam


class SaphanaLogBackup(SaphanaBackupParent):
    def __init__(self, parse_params_obj: SaphanaParseBackupParam, pid, job_id, subjob_id):
        super().__init__(parse_params_obj, pid, job_id, subjob_id)
        self._all_backup_id_list = set()  # 此次日志备份的所有backup id
        self._backup_files = set()
        self._start_commit_time = 0
        self._end_commit_time = 0
        self._last_log_position = {}

    def execute_backup_subtask(self):
        # 执行日志备份子任务
        # 1. 组装备份命令
        data_path = os.path.join(self._parse_params_obj.get_log_path(), "data")
        # 2. 启动一个线程查询备份进度
        progress_thread = threading.Thread(name="progress", target=self._upload_backup_progress)
        progress_thread.start()
        # 2. 获取日志备份路径
        log_backup_path = self._get_log_backup_path()
        if not log_backup_path:
            log.error("No log backup path.")
            self._error_code = SaphanaErrorCode.GET_LOG_BACKUP_PATH_FAIL
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 3. 查看较上一次日志备份是否有新的日志备份副本产生
        self._previous_backup_id = self._get_previous_backup_id()
        log.info(f"Previous backup id: {self._previous_backup_id}")
        # 获取新日志副本
        if not self._get_log_backup_ids():
            log.error("Get log backup copy failed.")
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 计算起止时间
        ret, self._start_commit_time, self._end_commit_time = self._calculate_log_start_end_time()
        log.info(f"Start commit time: {self._start_commit_time}, end commit time: {self._end_commit_time}")
        if not ret:
            self._job_status = SubJobStatusEnum.FAILED
            self._error_code = SaphanaErrorCode.CALCULATE_LOG_TIME_FAIL
            return False
        # 拷贝日志副本数据
        if not self._copy_log_backup_files(log_backup_path, data_path):
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 备份catalog
        if not self._backup_catalog():
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 生成副本信息
        if not self.report_copy_info():
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 修改权限
        if not self._change_permission_after_backup():
            self._job_status = SubJobStatusEnum.FAILED
            return False
        log.info(f"Remove log files: {self._backup_files}")
        self.clean_log_backup_path(log_backup_path, max(self._all_backup_id_list))
        self._job_status = SubJobStatusEnum.COMPLETED
        # 恢复后的第一次日志备份，删除恢复标记文件
        flag_path = os.path.join(
            os.path.dirname(self._parse_params_obj.get_cache_path()),
            f"{self._saphana_cluster.db_name_prefix()}{self._backup_db_name}_RESTORED")
        if os.path.exists(flag_path):
            os.remove(flag_path)
            log.info(f"Remove restore flag file: {flag_path}")
        return True

    def clean_log_backup_path(self, log_backup_path: str, max_id: int) -> None:
        files = [str(f.absolute())
                 for f in Path(log_backup_path).iterdir()
                 if f.is_file() and f.suffix[1:].isdigit() and int(f.suffix[1:]) < max_id]
        log.warning(f"Clean log backup path. {files}")
        [os.remove(f) for f in files]
        _dirs = [str(d.absolute())
                 for d in Path(log_backup_path).iterdir()
                 if d.is_dir() and d.name.isdigit() and int(d.name) < max_id]
        log.warning(f"Clean log backup path. {_dirs}")
        [shutil.rmtree(d, ignore_errors=True) for d in _dirs]

    def query_output(self, query_cmd, query_data_count):
        log.info(f"Query cmd: {query_cmd}")
        ret, output = self._exec_db_cmd(self._backup_db_name,
                                        self._backup_db_user,
                                        self._backup_db_pwd_env,
                                        query_cmd,
                                        SaphanaDbActionType.QUERY_CMD)
        if not ret:
            log.warning(f"Exec {query_cmd} failed.")
            return []
        output = output.split("\n")
        log.info(f"Query output: {output}")
        if len(output) != self._min_lines + query_data_count:
            log.warning(f"Get log copy failed. {output}")
            return []
        return output

    def _calculate_log_start_end_time(self):
        """
        功能：计算上次日志副本时间和本次日志副本时间，作为上报起止时间，用于时间轴连续；
             如果当前是首次日志备份，则选择上次全量副本时间，作为开始时间
        返回值： True/False, start_commit_time, end_commit_time
        """
        query_cmd = "SELECT ENTRY_TYPE_NAME, UTC_START_TIME from M_BACKUP_CATALOG " \
                    f"WHERE BACKUP_ID in ({self._previous_backup_id}, {self._backup_id})"
        query_data_count = 2
        output = self.query_output(query_cmd, query_data_count)
        if not output:
            return False, 0, 0
        end_commit_time = SaphanaBackupParent.utc_to_timestamp(output[3].split(",")[1])
        start_info = output[2].split(",")
        if SaphanaClusterManage.erase_tail(start_info[0]) == SapnanBackupType.LOG_BACKUP:
            start_commit_time = SaphanaBackupParent.utc_to_timestamp(start_info[1])
            log.info(f"Select last log copy as begin time.")
        else:
            query_cmd = "SELECT TOP 1 UTC_START_TIME from M_BACKUP_CATALOG " \
                        f"WHERE ENTRY_TYPE_NAME='{SapnanBackupType.FULL_BACKUP}' order by BACKUP_ID DESC"
            query_data_count = 1
            output = self.query_output(query_cmd, query_data_count)
            if not output:
                return False, 0, 0
            start_commit_time = SaphanaBackupParent.utc_to_timestamp(output[2])
            log.info(f"Select last full data copy as begin time.")
        return True, start_commit_time, end_commit_time

    def _prepare_backup_storage(self):
        # 准备备份存储介质
        mount_data_path = self._parse_params_obj.get_log_path()
        cache_path = self._parse_params_obj.get_cache_path()
        if not mount_data_path or not cache_path:
            log.error(f"No usable data path({mount_data_path}) or cache path({cache_path})")
            return False
        copy_id = self._parse_params_obj.get_copy_id()
        data_path = os.path.join(mount_data_path, "data")
        if not os.path.exists(data_path):
            os.makedirs(data_path)
            log.info(f"Create data path({data_path}) successfully.")
        catalog_path = os.path.join(mount_data_path, "catalog")
        if not os.path.exists(catalog_path):
            os.makedirs(catalog_path)
            log.info(f"Create catalog data path({catalog_path}) successfully.")
        if not os.path.exists(os.path.join(cache_path, copy_id)):
            os.makedirs(os.path.join(cache_path, copy_id))
        # 修改目录权限
        cluster_uid, cluster_gid = self._saphana_cluster.get_instance_user_group()
        log.info("Change attribute to cluster uid, gid")
        os.lchown(data_path, cluster_uid, cluster_gid)
        os.lchown(catalog_path, cluster_uid, cluster_gid)
        return True

    def _get_backup_progress(self):
        return

    def _generate_copy_info(self):
        metadata = {
            SaphanaMetadataKey.BACKUP_ID: self._backup_id,
            SaphanaMetadataKey.SAPHANA_VERSION: self._saphana_cluster.get_version(),
            SaphanaMetadataKey.MULTI_TENANT_SYSTEM: self._saphana_cluster.is_multi_system(),
            SaphanaMetadataKey.MASTER_SYSTEMDB_HOSTNAME: "",
            SaphanaMetadataKey.DB_INFO: self._saphana_cluster.get_db_nodes_and_services_list(self._backup_db_user,
                                                                                             self._backup_db_pwd_env),
            SaphanaMetadataKey.BACKUP_HOSTNAME: socket.gethostname(),
            SaphanaMetadataKey.LOG_BEGIN_TIME: self._start_commit_time,
            SaphanaMetadataKey.LOG_END_TIME: self._end_commit_time,
            SaphanaMetadataKey.LOG_DIR_NAME: "",
            SaphanaMetadataKey.ASSOCIATE_COPYIES: [],
            SaphanaMetadataKey.BACKUP_TIME: self._end_commit_time,
            SaphanaMetadataKey.COPY_ID: self._parse_params_obj.get_copy_id(),
            SaphanaMetadataKey.BACKUP_LOG_POSTION: self._last_log_position
        }

        all_repo = self._parse_params_obj.get_all_repositories()
        data_repo = None
        for repository in all_repo:
            if repository.get(SaphanaJsonConstant.REPOSITORY_TYPE, "") == RepositoryDataTypeEnum.LOG_REPOSITORY:
                data_repo = repository
        # 备份的数据目录
        data_rep_rsp = [
            CopyInfoRepModel(id=data_repo.get(SaphanaJsonConstant.ID),
                             repositoryType=data_repo.get(SaphanaJsonConstant.REPOSITORY_TYPE),
                             isLocal=data_repo.get(SaphanaJsonConstant.IS_LOCAL),
                             protocol="NFS",
                             remotePath=f"{data_repo.get(SaphanaJsonConstant.REMOTE_PATH)}/"
                                        f"{self._parse_params_obj.get_copy_id()}",
                             remoteHost=data_repo.get(SaphanaJsonConstant.REMOTE_HOST),
                             extendInfo={
                                 "fsId": data_repo.get(SaphanaJsonConstant.EXTEND_INFO, {}).get("fsId")
                             }).dict(by_alias=True)
        ]

        copy_info = Copy(repositories=data_rep_rsp, extendInfo=metadata, timestamp=self._end_commit_time)
        return ReportCopyInfoModel(copy=copy_info, jobId=self._parse_params_obj.get_maintask_id()).dict(by_alias=True)

    def _sql_data(self, cmd: str) -> Optional[list[list[str]]]:
        ret, output = self._exec_db_cmd(self._backup_db_name, self._backup_db_user, self._backup_db_pwd_env, cmd)
        if not ret:
            return None
        output = output.split("\n")
        if len(output) < self._min_lines:
            log.error("The number of output lines is less than 3.")
            return None
        data = []
        [data.append([SaphanaClusterManage.erase_tail(val) for val in line.split(",")]) for line in output[2:] if line]
        return data

    def _get_log_backup_ids(self):
        # 获取日志备份副本，有可能一次会生成多个副本
        query_cmd = "SELECT M_BACKUP_CATALOG.backup_id,M_BACKUP_CATALOG.state_name FROM M_BACKUP_CATALOG inner join " \
                    "M_BACKUP_CATALOG_FILES on M_BACKUP_CATALOG.backup_id=M_BACKUP_CATALOG_FILES.backup_id where " \
                    "M_BACKUP_CATALOG.entry_type_name='log backup' and " \
                    "M_BACKUP_CATALOG_FILES.source_type_name='volume'" \
                    f" and M_BACKUP_CATALOG.backup_id>{self._previous_backup_id} order by M_BACKUP_CATALOG.entry_id asc"
        ret, output = self._exec_db_cmd(self._backup_db_name, self._backup_db_user, self._backup_db_pwd_env, query_cmd)
        if not ret:
            return False
        output = output.split("\n")
        if len(output) <= self._min_lines:
            log.error("No log backup generate.")
            self._error_code = SaphanaErrorCode.NO_NEW_LOG_BACKUP
            return False
        output = output[2:]
        for each_record in output:
            values = each_record.split(',')
            if len(values) != 2:
                continue
            state_name = SaphanaClusterManage.erase_tail(values[1])
            temp_backup_id = int(SaphanaClusterManage.erase_tail(values[0]))
            if SaphanaStateName.SUCCESS in state_name:
                self._backup_id = self._backup_id if (temp_backup_id < self._backup_id) else temp_backup_id
                self._all_backup_id_list.add(temp_backup_id)
                continue
            else:
                # 如果是失败副本，跳过
                continue
        if not self._all_backup_id_list:
            # 没有一个成功的日志副本
            self._error_code = SaphanaErrorCode.NO_NEW_LOG_BACKUP
            log.error("Log backup failed.")
            return False
        log.info(f"Log backup successfully. Backup id : {self._backup_id}")
        return True

    def _get_previous_backup_id(self) -> int:
        flag_path = os.path.join(
            os.path.dirname(self._parse_params_obj.get_cache_path()),
            f"{self._saphana_cluster.db_name_prefix()}{self._backup_db_name}_RESTORED")
        if os.path.exists(flag_path):
            log.info(f"The database has been restored, flag file exist: {flag_path}.")
            previous_copy_info = self._query_previous_copy_info([CopyDataTypeEnum.FULL_COPY.value])
        else:
            previous_copy_info = self._query_previous_copy_info([CopyDataTypeEnum.LOG_COPY.value])
            if not previous_copy_info:
                log.info("This is the first log backup.")
                previous_copy_info = self._query_previous_copy_info([CopyDataTypeEnum.FULL_COPY.value])
        if not previous_copy_info:
            log.error("Fail to get previous backup info.")
            return 0
        previous_backup_id = SaphanaCopyInfoParam.get_backup_id(previous_copy_info)
        return previous_backup_id

    def _saphana_backup_files(self, files, dir_path):
        interval = 0
        job_id = self._parse_params_obj.get_maintask_id()
        res = backup_dirs(job_id, files, dir_path)
        query_progress_interval = 2
        if not res:
            log.error(f"Failed to start backup. jobId: {job_id}.")
            return False
        while True:
            interval += 1
            time.sleep(query_progress_interval)
            status, progress, data_size = query_progress(job_id)
            self._transferred_size = data_size if self._transferred_size < data_size else self._transferred_size
            if status == SaphanaFileBackupRet.SUCCESS:
                log.info(f"Backup completed, jobId: {job_id}.")
                self._total_backup_size = self._transferred_size * 1024
                return True
            if status == SaphanaFileBackupRet.FAILED:
                log.error(f"Backup failed, jobId: {job_id}.")
                return False
            if interval % 10 == 0:
                log.info(f"Backing up, status {status}, progress: {progress}, "
                         f"dataSize: {self._transferred_size}, jobId: {job_id}.")

    def _copy_log_backup_files(self, log_backup_path, target_path):
        log.info("Copy {%s} to {%s}", log_backup_path, target_path)
        if not os.path.exists(log_backup_path):
            log.error(f"Log backup path: {log_backup_path} not exists.")
            self._error_code = SaphanaErrorCode.LOG_BACKUP_PATH_NOT_EXIST
            self._log_detail_param = [log_backup_path]
            return False
        backuped_list = set()
        file_list = os.listdir(log_backup_path)
        log.info(f"All backup id: {self._all_backup_id_list}")
        # 获取要备份的文件列表
        for each_backup in file_list:
            backup_complete_path = os.path.join(log_backup_path, each_backup)
            if os.path.islink(backup_complete_path):
                log.warning(f"File {each_backup} is a link file")
                continue
            if not each_backup.isdigit():
                continue
            if int(each_backup) not in self._all_backup_id_list:
                continue
            backuped_list.add(os.path.join(backup_complete_path))
        if not backuped_list:
            log.error("No backup file.")
            self._error_code = SaphanaErrorCode.LOG_BACKUP_FILE_NOT_EXIST
            self._log_detail_param = list(self._all_backup_id_list)
            return False
        log.info(f"Backup list: {backuped_list}")
        # 备份文件
        tmp_backup_list = list(backuped_list)
        for i in range(0, len(backuped_list), LogFileCopyMax.MAX_LEN_EACH_BACKUP):
            if not self._saphana_backup_files(tmp_backup_list[i: i + LogFileCopyMax.MAX_LEN_EACH_BACKUP], target_path):
                log.error(f"Copy file {tmp_backup_list} failed.")
                self._error_code = SaphanaErrorCode.BACKUP_LOG_FILE_FAIL
                return False
            #日志备份是异步操作，每一次日志备份结束后需要预留时间删除目录，防止下次备份所需要的目录被上次备份所删除，导致任务卡主
            time.sleep(5)
            log.info("Reserve time for deleting scanner_path during each backup.")
        self._backup_files = backuped_list
        log.info("Copy all log backup files successfully.")
        return True

    def _exec_abort_backup(self):
        return True

    def _change_permission_after_backup(self):
        # 备份完成后,将数据权限改成755,支持worm
        mount_data_path = self._parse_params_obj.get_log_path()
        if not mount_data_path:
            log.error(f"No usable data path({mount_data_path})")
            return False
        ret, _, err = execute_cmd(f"chmod 755 -R {mount_data_path}")
        if ret != CMDResult.SUCCESS:
            log.error(f"Change mod failed for {mount_data_path} for {err}")
            return False
        return True

    def _backup_catalog(self):
        # 备份 catalog 文件

        catalog_backup_id = str(self._get_catalog_id())
        log.info(f"catalog_backup_id: {catalog_backup_id}")
        catalog_backuped_path = os.path.join(self._get_log_backup_path(), catalog_backup_id)
        if not os.path.exists(catalog_backuped_path):
            log.error("Fail to backup catalog %s", catalog_backuped_path)
            return False
        catalog_path = os.path.join(self._parse_params_obj.get_log_path(), "catalog")
        for each_file in os.listdir(catalog_backuped_path):
            ret, output = CommonFuction.exec_shell_cmd(f"cp {os.path.join(catalog_backuped_path, each_file)} "
                                                       f"{catalog_path}/{each_file}.{catalog_backup_id}",
                                                       need_verify=False)
            if not ret:
                log.error("Fail to backup catalog for %s", output)
                return False
        log.info(f"Backup catalog {catalog_backuped_path} to {catalog_path} successfully.")
        return True
