#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import shutil
import socket
import stat
import threading
import time

import psutil

from common.common import execute_cmd
from common.common_models import CopyInfoRepModel, Copy, ReportCopyInfoModel
from common.const import SubJobStatusEnum, RepositoryDataTypeEnum, CMDResult, CopyDataTypeEnum, RpcToolInterface
from common.exception.common_exception import ErrCodeException
from common.util.backup import backup_files, query_progress
from generaldb.saphana.backup.saphana_backup_parent import SaphanaBackupParent
from generaldb.saphana.backup.saphana_parse_backup_params import SaphanaParseBackupParam, SaphanaCopyInfoParam
from generaldb.saphana.comm.common_util import log, get_value_from_str_lines
from generaldb.saphana.comm.saphana_common_function import CommonFuction
from generaldb.saphana.comm.saphana_const import SaphanaConst, SaphanaDbActionType, SaphanaStateName, \
    SaphanaLogBackupStage, SaphanaErrorCode, SaphanaMetadataKey, SaphanaJsonConstant, SaphanaFileBackupRet, \
    LogFileCopyMax, CommitTimeType, SaphanaRpcParamKey
from generaldb.saphana.resources.saphana_cluster_manager import SaphanaClusterManage
from saphanay.backup.log_backup_saphana import LogBackupSAPHANA
from common.parse_parafile import get_env_variable
from saphanay.comm.error_code_exception import ErrorCodeException


class SaphanaLogBackup(SaphanaBackupParent):
    def __init__(self, parse_params_obj: SaphanaParseBackupParam, pid):
        super().__init__(parse_params_obj, pid)
        self._backup_id = 0  # 每次日志备份可能会生成多个日志副本，这个是此次日志备份最早的一次副本ID
        self._job_status = SubJobStatusEnum.RUNNING
        self._error_code = 0
        self._all_backup_id_list = set()  # 此次日志备份的所有backup id
        self._log_detail_param = []
        self._backup_files = set()
        self._transferred_size = 0
        self._total_backup_size = 0
        self.saphana = LogBackupSAPHANA(self._sid, self._backup_db_name, self._system_db_port,
                                        self._system_db_user, get_env_variable(self._backup_db_pwd_env))
        self._start_commit_time = 0
        self._end_commit_time = 0

    def execute_backup_subtask(self):
        # 执行日志备份子任务
        # 1. 组装备份命令
        data_path = self._link_path
        # 2. 启动一个线程查询备份进度
        progress_thread = threading.Thread(name="progress", target=self._upload_backup_progress)
        progress_thread.start()

        try:
            log_backup_path = os.path.dirname(self.saphana.get_log_backup_path())
            log_ids, _ = self.saphana.select_log_file_backup_ids()
            # 只选择最后一段连续的日志
            log_id_line = self.saphana.merge_log_backup_ids(log_ids)[-1]
            self._backup_id = log_id_line[1]
            self._all_backup_id_list = self.saphana.query_log_backup_id_records(log_id_line[0], log_id_line[1])
            _, self._end_commit_time = log_id_line[0]//1000, log_id_line[1]//1000
        except ErrorCodeException as exc:
            log.error(f"Log backup failed. {exc}", exc_info=1)
            self._error_code = exc.error_code
            self._job_status = SubJobStatusEnum.FAILED
            return False

        # 判断是否连续
        if not self._is_log_continuous():
            self._error_code = SaphanaErrorCode.LOG_INCONSISTENT
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 拷贝日志副本数据
        if not self._copy_logbackup_files(log_backup_path, data_path):
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 备份catalog
        if not self._backup_catalog():
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 生成副本信息
        if not self.report_copy_info():
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 修改权限
        if not self._change_permission_after_backup():
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 清理日志备份路径
        self.saphana.clean_log_backup_path(max(self._all_backup_id_list))
        self._job_status = SubJobStatusEnum.COMPLETED
        return True

    def _is_log_continuous(self) -> bool:
        previous_log = self._query_previous_copy_info([CopyDataTypeEnum.LOG_COPY.value])
        previous_id = SaphanaCopyInfoParam.get_backup_id(previous_log)
        not previous_id and log.info(f"No previous log copy.")
        if previous_id and self.saphana.query_is_log_continuous(previous_id, min(self._all_backup_id_list)):
            self._start_commit_time = int(SaphanaCopyInfoParam.get_backup_timestamp(previous_log))
            log.info(f"Log is continuous with previous log copy.")
            return True
        previous_id and log.warning(f"Log is not continuous with previous log copy.")
        previous_full = self._query_previous_copy_info([CopyDataTypeEnum.FULL_COPY.value])
        previous_id = SaphanaCopyInfoParam.get_backup_id(previous_full)
        if not previous_id:
            log.error(f"No previous full copy.")
            return False
        if self.saphana.query_is_log_continuous(previous_id, min(self._all_backup_id_list)):
            self._start_commit_time = int(SaphanaCopyInfoParam.get_backup_timestamp(previous_full))
            log.info(f"Log is continuous with previous full copy.")
            return True
        log.error(f"Log backup failed. Log is not continuous.")
        return False

    def _prepare_backup_storage(self):
        # 准备备份存储介质
        mount_data_path = self._parse_params_obj.get_log_path()
        cache_path = self._parse_params_obj.get_cache_path()
        if not mount_data_path or not cache_path:
            log.error(f"No usable data path({mount_data_path}) or cache path({cache_path})")
            return False
        copy_id = self._parse_params_obj.get_copy_id()
        data_path = os.path.join(mount_data_path, "data")
        if not os.path.exists(data_path):
            os.makedirs(data_path)
            log.info(f"Create data path({data_path}) successfully.")
        catalog_path = os.path.join(mount_data_path, "catalog")
        if not os.path.exists(catalog_path):
            os.makedirs(catalog_path)
            log.info(f"Create catalog data path({catalog_path}) successfully.")
        if not os.path.exists(os.path.join(cache_path, copy_id)):
            os.makedirs(os.path.join(cache_path, copy_id))
        # 修改目录权限
        cluster_uid, cluster_gid = self._saphana_cluster.get_instance_user_group()
        log.info("Change attribute to cluster uid, gid")
        os.lchown(data_path, cluster_uid, cluster_gid)
        os.lchown(catalog_path, cluster_uid, cluster_gid)
        return True

    def _get_backup_progress(self):
        return

    def _generate_copy_info(self):
        metadata = {
            SaphanaMetadataKey.BACKUP_ID: self._backup_id,
            SaphanaMetadataKey.SAPHANA_VERSION: self._saphana_cluster.get_version(),
            SaphanaMetadataKey.MULTI_TENANT_SYSTEM: self._saphana_cluster.is_multi_system(),
            SaphanaMetadataKey.MASTER_SYSTEMDB_HOSTNAME: "",
            SaphanaMetadataKey.DB_INFO: self._saphana_cluster.get_db_nodes_and_services_list(self._backup_db_user,
                                                                                             self._backup_db_pwd_env),
            SaphanaMetadataKey.BACKUP_HOSTNAME: socket.gethostname(),
            SaphanaMetadataKey.LOG_BEGIN_TIME: self._start_commit_time,
            SaphanaMetadataKey.LOG_END_TIME: self._end_commit_time,
            SaphanaMetadataKey.LOG_DIR_NAME: "",
            SaphanaMetadataKey.ASSOCIATE_COPYIES: [],
            SaphanaMetadataKey.BACKUP_TIME: self._end_commit_time,
            SaphanaMetadataKey.COPY_ID: self._parse_params_obj.get_copy_id()
        }

        all_repo = self._parse_params_obj.get_all_repositories()
        data_repo = None
        for repository in all_repo:
            if repository.get(SaphanaJsonConstant.REPOSITORY_TYPE, "") == RepositoryDataTypeEnum.LOG_REPOSITORY:
                data_repo = repository
        # 备份的数据目录
        data_rep_rsp = [
            CopyInfoRepModel(
                id=data_repo.get(SaphanaJsonConstant.ID),
                repositoryType=data_repo.get(SaphanaJsonConstant.REPOSITORY_TYPE),
                isLocal=data_repo.get(SaphanaJsonConstant.IS_LOCAL),
                protocol="NFS",
                remotePath=f"{data_repo.get(SaphanaJsonConstant.REMOTE_PATH)}/"
                           f"{self._parse_params_obj.get_copy_id()}",
                remoteHost=data_repo.get(SaphanaJsonConstant.REMOTE_HOST),
                extendInfo={
                    "fsId": data_repo.get(SaphanaJsonConstant.EXTEND_INFO, {}).get("fsId")
                }
            ).dict(by_alias=True)
        ]

        copy_info = Copy(repositories=data_rep_rsp, extendInfo=metadata, timestamp=self._end_commit_time)
        return ReportCopyInfoModel(copy=copy_info, jobId=self._parse_params_obj.get_maintask_id()).dict(by_alias=True)

    def _saphana_backup_files(self, files, dir_path):
        interval = 0
        job_id = self._parse_params_obj.get_maintask_id()
        res = backup_files(job_id, files, dir_path)
        query_progress_interval = 2
        if not res:
            log.error(f"Failed to start backup. jobId: {job_id}.")
            return False
        while True:
            interval += 1
            time.sleep(query_progress_interval)
            status, progress, data_size = query_progress(job_id)
            self._transferred_size = data_size if self._transferred_size < data_size else self._transferred_size
            if status == SaphanaFileBackupRet.SUCCESS:
                log.info(f"Backup completed, jobId: {job_id}.")
                self._total_backup_size = self._transferred_size * 1024
                return True
            if status == SaphanaFileBackupRet.FAILED:
                log.error(f"Backup failed, jobId: {job_id}.")
                return False
            if interval % 10 == 0:
                log.debug(f"Backing up, progress: {progress}, dataSize: {self._transferred_size}, jobId: {job_id}.")

    def _copy_logbackup_files(self, log_backup_path, target_path):
        if self._saphana_cluster.is_multi_system() and self._saphana_cluster.is_system_db():
            target_db_logpath = os.path.join(log_backup_path, f"{self._backup_db_name.upper()}")
        else:
            target_db_logpath = os.path.join(log_backup_path, f"DB_{self._backup_db_name.upper()}")
        log.debug(f"Copy {target_db_logpath} to {target_path}")
        if not os.path.exists(target_db_logpath):
            log.error(f"Log backup path: {target_db_logpath} not exists.")
            self._error_code = SaphanaErrorCode.LOG_BACKUP_PATH_NOT_EXIST
            self._log_detail_param = [target_db_logpath]
            return False
        backuped_list = set()
        file_list = os.listdir(target_db_logpath)
        log.debug(f"All backup id: {self._all_backup_id_list}")
        # 获取要备份的文件列表
        for each_file in file_list:
            file_complete_path = os.path.join(target_db_logpath, each_file)
            if os.path.islink(file_complete_path):
                log.warning(f"File {each_file} is a link file")
                continue
            temp_file_name = str(each_file).split(".")
            if not str(each_file).startswith("log_backup") or len(temp_file_name) != 2:
                log.warning(f"File {each_file} is not a log backup file.")
                continue
            if int(temp_file_name[1]) in self._all_backup_id_list:
                backuped_list.add(file_complete_path)
        if not backuped_list:
            log.error("No backup file.")
            self._error_code = SaphanaErrorCode.LOG_BACKUP_FILE_NOT_EXIST
            self._log_detail_param = list(self._all_backup_id_list)
            return False
        log.debug(f"Backup list: {backuped_list}")
        # 备份文件
        tmp_backup_list = list(backuped_list)
        for i in range(0, len(backuped_list), LogFileCopyMax.MAX_LEN_EACH_BACKUP):
            if not self._saphana_backup_files(tmp_backup_list[i: i + LogFileCopyMax.MAX_LEN_EACH_BACKUP], target_path):
                log.error(f"Copy file {tmp_backup_list} failed.")
                self._error_code = SaphanaErrorCode.BACKUP_LOG_FILE_FAIL
                return False
        self._backup_files = backuped_list
        log.info("Copy all log backup files successfully.")
        return True

    def _exec_abort_backup(self):
        return True

    def _change_permission_after_backup(self):
        # 备份完成后,将数据权限改成755,支持worm
        mount_data_path = self._parse_params_obj.get_log_path()
        if not mount_data_path:
            log.error(f"No usable data path({mount_data_path})")
            return False
        ret, _, err = execute_cmd(f"chmod 755 -R {mount_data_path}")
        if ret != CMDResult.SUCCESS:
            log.error(f"Change mod failed for {mount_data_path} for {err}")
            return False
        return True
