#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import shutil
import socket
import threading
import time

from common.cleaner import clear
from common.common_models import Copy, CopyInfoRepModel, ReportCopyInfoModel
from common.const import BackupTypeEnum, SubJobStatusEnum, RepositoryDataTypeEnum
from common.exception.common_exception import ErrCodeException
from common.security.anonym_utils.anonymity import Anonymity
from generaldb.saphana.comm.saphana_common_function import CommonFuction
from saphana.backup.saphana_backup_parent import SaphanaBackupParent
from saphana.backup.saphana_parse_backup_params import SaphanaParseBackupParam
from generaldb.saphana.comm.common_util import log
from generaldb.saphana.comm.saphana_const import SaphanaStateName, SaphanaMetadataKey, SaphanaJsonConstant, \
    SaphanaDbActionType, SaphanaErrorCode, SaphanaConst
from saphana.resource.saphana_cluster_manager import SaphanaClusterManage


class SaphanaDataBackup(SaphanaBackupParent):
    def __init__(self, parse_params_obj: SaphanaParseBackupParam, pid, job_id, sub_job_id):
        super().__init__(parse_params_obj, pid, job_id, sub_job_id)

    def execute_backup_subtask(self):
        # 执行备份子任务
        # 1. 组装备份命令
        backup_type_map = {BackupTypeEnum.FULL_BACKUP: "", BackupTypeEnum.DIFF_BACKUP: "differential",
                           BackupTypeEnum.INCRE_BACKUP: "incremental"}
        back_cmd = f"backup data {backup_type_map.get(self._backup_type)} using backint('');"
        log.info(f"Backup cmd : {back_cmd}")
        # 2. 启动一个线程查询备份进度
        self.backup_start_time = time.time()
        progress_thread = threading.Thread(name="progress", target=self._upload_backup_progress)
        progress_thread.start()
        # 获取上一次最近的备份ID
        self._get_last_backup_id()
        # 3. 执行备份命令
        try:
            ret, output = self._saphana_cluster.execute_query_in_db(self._backup_db_name, self._backup_db_user,
                                                                    self._backup_db_pwd_env, back_cmd,
                                                                    SaphanaDbActionType.PROTECT_CMD)
        except ErrCodeException as err:
            self._error_code = err.error_code
            self._job_status = SubJobStatusEnum.FAILED
            self._error_code = SaphanaErrorCode.EXEC_BACKUP_RECOVER_CMD_FAIL
            self._log_detail_param = ["backup", err.error_message]
            log.error(f"Execute backup failed for {err.error_message}")
            return False
        except Exception as unknown_err:
            log.error(f"Execute backup failed for {unknown_err}")
            self._job_status = SubJobStatusEnum.FAILED
            self._error_code = SaphanaErrorCode.EXEC_BACKUP_RECOVER_CMD_FAIL
            self._log_detail_param = ["backup", str(unknown_err)]
            return False
        log.info(f"Exec backup cmd ret: {ret}, output: {output}")
        self.backup_end_time = time.time()
        if not ret:
            self._job_status = SubJobStatusEnum.FAILED
            self._error_code = SaphanaErrorCode.EXEC_BACKUP_RECOVER_CMD_FAIL
            self._log_detail_param = ["backup", Anonymity.process(output)] if output else ["backup", "unknown"]
            if "Allocation failed" in output:
                self._error_code = SaphanaErrorCode.AGENT_INSUFFICIENT_MEMORY
            log.error(f"Backup failed for {Anonymity.process(output)}")
            return False
        # 更新backup_id
        self._get_backup_progress()
        log.info(f"Backup id: {self._backup_id}.")
        if not self._backup_id:
            log.error("The backup didn't generate backup id.")
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 备份catalog
        if not self._backup_catalog():
            self._job_status = SubJobStatusEnum.FAILED
            return False
        # 生成副本信息
        if not self.report_copy_info():
            self._job_status = SubJobStatusEnum.FAILED
            return False
        self._job_status = SubJobStatusEnum.COMPLETED
        return True

    def _backup_catalog(self):
        # 备份catalog文件
        catalog_backup_id = str(self._get_catalog_id())
        mount_data_path = self._parse_params_obj.get_data_path()
        copy_id = self._parse_params_obj.get_copy_id()
        catalog_data_path = os.path.join(mount_data_path, copy_id, catalog_backup_id)
        if os.path.exists(catalog_data_path):
            log.info("Catalog %s is backuped by backint", catalog_backup_id)
            return True
        catalog_backint_path = os.path.join(self._get_log_backup_path(), catalog_backup_id)
        if not os.path.exists(catalog_backint_path):
            log.error("Fail to backup catalog %s", catalog_backup_id)
            return False
        cmd = f"cp -r {catalog_backint_path}/ {os.path.join(mount_data_path, copy_id)}"
        ret, output = CommonFuction.exec_shell_cmd(cmd, need_verify=False)
        if not ret:
            log.error("Fail to backup catalog for %s", output)
            return False
        log.info(f"Backup catalog successfully.")
        return True

    def _prepare_backup_storage(self):
        # 准备备份存储介质
        mount_data_path = self._parse_params_obj.get_data_path()
        cache_path = self._parse_params_obj.get_cache_path()
        if not mount_data_path or not cache_path:
            log.error(f"No usable data path({mount_data_path}) or cache path({cache_path})")
            return False
        copy_id = self._parse_params_obj.get_copy_id()
        data_path = os.path.join(mount_data_path, copy_id)
        if not os.path.exists(data_path):
            os.makedirs(data_path)
            log.info(f"Create data path({data_path}) successfully.")
        # 修改目录权限
        cluster_uid, cluster_gid = self._saphana_cluster.get_instance_user_group()
        log.info("Change attribute to cluster uid, gid")
        os.lchown(os.path.join(mount_data_path, copy_id), cluster_uid, cluster_gid)
        os.lchown(data_path, cluster_uid, cluster_gid)
        os.lchown(cache_path, cluster_uid, cluster_gid)
        return True

    def _get_backup_progress(self):
        """
        获取备份实时进度：当租户数据库分布在多个节点上时，一次备份任务会有多条备份记录，与节点数量一致。
        """
        # 从数据库中读取进度
        # 获取进度
        query_cmd = "select * from M_BACKUP_PROGRESS;"
        ret, output = self._saphana_cluster.execute_query_in_db(self._backup_db_name, self._backup_db_user,
                                                                self._backup_db_pwd_env, query_cmd)
        output = output.split("\n")[2:]
        result_list = []  # 查询到的所有本次备份相关的记录结果
        attribute_num = 12  # 查询表的属性个数
        tmp_total_backup_size = 0
        tmp_transferred_size = 0
        for record in output:
            value_list = record.split(',')
            if len(value_list) < attribute_num:
                log.warning(f"Record({record}) is not legal value.")
                continue
            self._backup_id = int(SaphanaClusterManage.erase_tail(value_list[0]))
            if self._backup_id <= self._previous_backup_id:
                log.warning(f"This is not the latest backup id: {self._backup_id}")
                return
            tmp_total_backup_size += int(SaphanaClusterManage.erase_tail(value_list[10]))
            tmp_transferred_size += \
                int(SaphanaClusterManage.erase_tail(value_list[11]))
            result_list.append(SaphanaClusterManage.erase_tail(value_list[9]).replace(' ', ''))
        log.info(f"Total size: {tmp_total_backup_size}, Transferred: {tmp_transferred_size}.")
        log.info(f"All record result: {result_list}.")
        self._total_backup_size = tmp_total_backup_size
        self._transferred_size = tmp_transferred_size
        if not result_list:
            return
        for each_ret in result_list:
            if SaphanaStateName.FAIL in each_ret or SaphanaStateName.CANCELED in each_ret:
                log.error(f"Backup failed. Backup id : {self._backup_id}")
                return
        if self._total_backup_size != 0:
            self.progress = self._transferred_size / self._total_backup_size * 100

    def _get_log_position(self):
        """
        获取每个节点的redo_log_position，返回：{"hana1":123, "hana2":134}
        """
        result = {}
        query_cmd = "select host,redo_log_position from m_backup_catalog_files inner join M_BACKUP_CATALOG on " \
                    f"M_BACKUP_CATALOG_FILES.backup_id={self._backup_id} and " \
                    f"M_BACKUP_CATALOG_FILES.backup_id=M_BACKUP_CATALOG.backup_id where " \
                    f"entry_type_name!='log backup' and source_type_name='volume'"

        ret, output = self._exec_db_cmd(self._backup_db_name, self._backup_db_user, self._backup_db_pwd_env, query_cmd,
                                        SaphanaDbActionType.QUERY_CMD)
        output = output.split('\n')
        if not ret or len(output) <= self._min_lines:
            log.error(f"Fail to get redo log pisition for {self._backup_id}")
            raise ErrCodeException(SaphanaErrorCode.SYSTEM_ERROR, message="Fail to get log position")
        for item in output[2:]:
            temp_value = item.split(',')
            if len(temp_value) < 2:
                continue
            hostname = temp_value[0].strip("\"")
            redo_log_positioin = int(SaphanaClusterManage.erase_tail(temp_value[1]))
            if hostname in result.keys():
                log.error(f"Two record about host {hostname}, it's illegal.")
                return {}
            result[hostname] = redo_log_positioin
        return result

    def _generate_copy_info(self):
        # 获取log_postion
        redo_log_position = self._get_log_position()
        # 获取备份起始时间
        query_cmd = "select top 1 UTC_START_TIME from M_BACKUP_PROGRESS;"
        ret, output = self._exec_db_cmd(self._backup_db_name, self._backup_db_user, self._backup_db_pwd_env, query_cmd,
                                        SaphanaDbActionType.QUERY_CMD)
        output = output.split('\n')
        if not ret or len(output) <= self._min_lines:
            log.error(f"Fail to get backup position for {self._backup_id}")
            raise ErrCodeException(SaphanaErrorCode.SYSTEM_ERROR, message="Fail to get log position")
        backup_timestamp = SaphanaBackupParent.utc_to_timestamp(output[2])
        metadata = {
            SaphanaMetadataKey.BACKUP_ID: self._backup_id,
            SaphanaMetadataKey.SAPHANA_VERSION: self._saphana_cluster.get_version(),
            SaphanaMetadataKey.MULTI_TENANT_SYSTEM: self._saphana_cluster.is_multi_system(),
            SaphanaMetadataKey.MASTER_SYSTEMDB_HOSTNAME:
                self._saphana_cluster.get_master_hostname(self._system_db_pwd_env),
            SaphanaMetadataKey.DB_INFO: self._saphana_cluster.get_db_nodes_and_services_list(self._backup_db_user,
                                                                                             self._backup_db_pwd_env),
            SaphanaMetadataKey.BACKUP_HOSTNAME: socket.gethostname(),
            SaphanaMetadataKey.BACKUP_LOG_POSTION: redo_log_position,
            SaphanaMetadataKey.BACKUP_TIME: backup_timestamp,
            SaphanaMetadataKey.BACKUP_PATH: f"/{self._parse_params_obj.get_copy_id()}",
            SaphanaMetadataKey.COPY_ID: self._parse_params_obj.get_copy_id(),
            SaphanaMetadataKey.SYSTEM_ID: self._sid
        }

        all_repo = self._parse_params_obj.get_all_repositories()
        data_repo = None
        for repository in all_repo:
            if repository.get(SaphanaJsonConstant.REPOSITORY_TYPE, "") == RepositoryDataTypeEnum.DATA_REPOSITORY:
                data_repo = repository
        # 备份的数据目录
        data_rep_rsp = [
            CopyInfoRepModel(id=data_repo.get(SaphanaJsonConstant.ID),
                             repositoryType=data_repo.get(SaphanaJsonConstant.REPOSITORY_TYPE),
                             isLocal=data_repo.get(SaphanaJsonConstant.IS_LOCAL), protocol="NFS",
                             remotePath=f"{data_repo.get(SaphanaJsonConstant.REMOTE_PATH)}/"
                             f"{self._parse_params_obj.get_copy_id()}",
                             remoteHost=data_repo.get(SaphanaJsonConstant.REMOTE_HOST),
                             extendInfo={
                                 "fsId": data_repo.get(SaphanaJsonConstant.EXTEND_INFO, {}).get("fsId")
                             }).dict(by_alias=True)
        ]
        copy_info = Copy(repositories=data_rep_rsp, extendInfo=metadata, timestamp=backup_timestamp)
        return ReportCopyInfoModel(copy=copy_info, jobId=self._parse_params_obj.get_maintask_id()).dict(by_alias=True)

    def _get_last_backup_id(self):
        # 在备份前获取上一次备份成功的backup_id
        query_cmd = "select top 1 backup_id from M_BACKUP_CATALOG order by backup_id desc"
        system_db_name = SaphanaConst.SYSTEM_DB if self._saphana_cluster.is_multi_system() else self._sid
        temp_db_name = system_db_name if self._backup_type == BackupTypeEnum.LOG_BACKUP else self._backup_db_name
        temp_db_user = self._system_db_user \
            if self._backup_type == BackupTypeEnum.LOG_BACKUP else self._backup_db_user
        temp_pw = self._system_db_pwd_env if self._backup_type == BackupTypeEnum.LOG_BACKUP else self._backup_db_pwd_env
        try:
            ret, output = self._saphana_cluster.execute_query_in_db(temp_db_name, temp_db_user, temp_pw, query_cmd)
        except ErrCodeException as err:
            log.warning(f"Fail to get backup progress for {err}")
            return
        finally:
            if self._backup_type == BackupTypeEnum.LOG_BACKUP:
                clear(temp_db_user)
        if not ret:
            log.warning(f"Fail to get backup progress for {output}")
            return
        min_lines = 3  # 一行查询命令，一行属性，最后一行为空行
        output = output.split("\n")
        if len(output) > min_lines:
            self._previous_backup_id = int(SaphanaClusterManage.erase_tail(output[2]))
            log.info(f"Last backup id : {self._previous_backup_id}")

    def _exec_abort_backup(self):
        # 停止备份命令
        query_cmd = "SELECT BACKUP_ID FROM M_BACKUP_CATALOG WHERE ENTRY_TYPE_NAME = 'complete data backup' " \
                    "AND STATE_NAME = 'running' ORDER BY UTC_START_TIME DESC;"
        ret, output = self._saphana_cluster.execute_query_in_db(self._backup_db_name, self._backup_db_user,
                                                                self._backup_db_pwd_env, query_cmd)
        output = output.split('\n')
        if not ret or len(output) <= self._min_lines:
            log.error(f"Fail to get running backup id.")
            return
        log.info(f"Cancel backup id {output[2]}")
        abort_cmd = f"backup cancel {int(output[2])}"
        ret, _ = self._saphana_cluster.execute_query_in_db(self._backup_db_name, self._backup_db_user,
                                                           self._backup_db_pwd_env, abort_cmd,
                                                           SaphanaDbActionType.PROTECT_CMD)
        if not ret:
            log.error("Exec cancel cmd failed.")
            return
        while True:
            query_cmd = "SELECT count(*) FROM M_BACKUP_CATALOG WHERE ENTRY_TYPE_NAME != 'log backup' " \
                        f"AND (STATE_NAME = 'running' or STATE_NAME = '{SaphanaStateName.CANCELING}') " \
                        f"ORDER BY UTC_START_TIME DESC;"
            ret, output = self._saphana_cluster.execute_query_in_db(self._backup_db_name, self._backup_db_user,
                                                                    self._backup_db_pwd_env, query_cmd)
            output = output.split('\n')
            if not ret or len(output) <= self._min_lines:
                if output[1] and SaphanaStateName.ALREADY_CANCELED in output[1]:
                    log.info("Cancel successfully.")
                    return
                log.error(f"Get cancel state failed for {output}")
                return
            if not int(output[2]):
                log.info("Cancel successfully.")
                return
            time.sleep(self._query_progress_interval)
