#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import shutil
import socket
import time
from abc import ABC, abstractmethod

import psutil

from common.cleaner import clear
from common.common_models import SubJobModel, SubJobDetails, LogDetail
from common.const import BackupTypeEnum, SubJobTypeEnum, SubJobPolicyEnum, SubJobStatusEnum, DBLogLevel, \
    BackupJobResult, RpcToolInterface, CopyDataTypeEnum
from common.enums.common_enums import DeployTypeEnum
from common.exception.common_exception import ErrCodeException
from common.parse_parafile import add_env_param
from generaldb.saphana.backup.saphana_parse_backup_params import SaphanaParseBackupParam, SaphanaCopyInfoParam
from generaldb.saphana.comm.common_util import log, log_start, get_localhost_hostname, get_localhost_register_ip
from generaldb.saphana.comm.saphana_const import SaphanaJsonConstant, SaphanaErrorCode, SaphanaClusterStatus, \
    SaphanaSubjobName, SaphanaTaskLabel, SaphanaRpcParamKey, SaphanaConst, SaphanaDbActionType
from generaldb.saphana.comm.saphana_common_function import CommonFuction
from generaldb.saphana.comm.saphana_resource_param import SaphanaResourceParam
from generaldb.saphana.resources.saphana_cluster_manager import SaphanaClusterManage


class SaphanaBackupParent(ABC):
    def __init__(self, parse_params_obj: SaphanaParseBackupParam, pid):
        if not parse_params_obj:
            log.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        self._parse_params_obj = parse_params_obj
        self._backup_type = self._parse_params_obj.get_backup_type()
        self._backup_db_name = self._parse_params_obj.get_backup_db_name()
        self._backup_db_user = self._parse_params_obj.get_backup_db_user()
        self._backup_db_pwd_env = f"job_protectObject_auth_authPwd_{pid}"
        self._sid = self._parse_params_obj.get_specified_self_define_params(SaphanaJsonConstant.SYSTEM_ID).lower()
        if not CommonFuction.saphana_check_os_user(self._sid):
            raise Exception("Sid error.")
        self._system_db_port = \
            self._parse_params_obj.get_specified_self_define_params(SaphanaJsonConstant.SYSTEM_DB_PORT)
        self._system_db_user = ""
        self._system_db_pwd_env = ""
        self._pid = pid
        self._init_system_db_param()
        self._saphana_cluster = SaphanaClusterManage({SaphanaJsonConstant.SYSTEM_ID: self._sid,
                                                      SaphanaJsonConstant.SYSTEM_DB_PORT: self._system_db_port,
                                                      SaphanaJsonConstant.PROTECT_DATABASE: self._backup_db_name,
                                                      SaphanaJsonConstant.SYSTEM_DB_USER: self._system_db_user},
                                                     self._pid)
        self._system_db_name = SaphanaConst.SYSTEM_DB if self._saphana_cluster.is_multi_system() else self._sid
        self._error_code = 0  # 错误码
        self._progress = 0
        self._job_status = SubJobStatusEnum.RUNNING
        self._total_backup_size = 0  # 备份数据总量，数据库查出来是以byte为单位
        self._log_detail_param = []  # 错误码参数
        self._transferred_size = 0  # 已备份数据量
        self._query_progress_interval = 10  # 定期查询进度的间隔，10s
        self._backup_id = 0
        self._min_lines = 3  # select结果除了查出的数据外其他的行数,一行查询命令，一行属性，最后一行为空行
        self._link_path = ""  # 备份时软链接连接路径
        if not self._init_link_path():
            raise Exception("Init link path failed.")

    @staticmethod
    def utc_to_timestamp(utc_time):
        # 将从saphana获取到的utc时间转换为时间戳
        sys_start_time = SaphanaClusterManage.erase_tail(utc_time)
        sys_start_time = sys_start_time if not sys_start_time.split('.') else sys_start_time.split('.')[0]
        try:
            time_stamp = int(time.mktime(time.strptime(sys_start_time, "%Y-%m-%d %H:%M:%S")))
        except Exception:
            log.error(f"Illegal time format: {sys_start_time}")
            time_stamp = 0
        return time_stamp

    @abstractmethod
    def _prepare_backup_storage(self):
        # 准备备份存储介质
        pass

    @abstractmethod
    def _get_backup_progress(self):
        # 获取备份进度，数据备份与日志备份各自实现
        pass

    @abstractmethod
    def _generate_copy_info(self):
        # 生成副本信息，数据备份与日志备份各自实现
        pass

    @log_start()
    def _backup_catalog(self):
        # 备份catalog文件
        if not self._backup_id:
            log.error("After backup, the backup_id didn't not set value.")
            return False
        query_cmd = f"select top 1 destination_path from M_BACKUP_CATALOG_FILES where backup_id > {self._backup_id} " \
                    f"and source_type_name = 'catalog' order by entry_id asc"
        try:
            ret, output = self._saphana_cluster.execute_query_in_db(self._backup_db_name, self._backup_db_user,
                                                                    self._backup_db_pwd_env, query_cmd)
        except ErrCodeException as err:
            self._error_code = err.error_code
            log.error("Get catalog backup info failed.")
            return False
        if not ret:
            log.error(f"Get catalog backup info failed for {output}")
            return False
        min_lines = 3  # 一行查询命令，一行属性，最后一行为空行
        output = output.split("\n")
        if len(output) <= min_lines:
            log.warning(f"No progress record. Output: {output}")
            return False
        catalog_file_path = self._saphana_cluster.erase_tail(output[2])
        if self._backup_type == BackupTypeEnum.LOG_BACKUP:
            mount_data_path = self._parse_params_obj.get_log_path()
            catalog_path = os.path.join(mount_data_path, "catalog")
        else:
            mount_data_path = self._parse_params_obj.get_data_path()
            catalog_path = os.path.join(mount_data_path, self._parse_params_obj.get_copy_id(), "catalog")
        ret, err = CommonFuction.exec_shell_cmd(f"cp {catalog_file_path} {catalog_path}", need_verify=0)
        if not ret:
            log.error(f"Backup catalog {catalog_file_path} failed for {err}")
            return False
        log.info(f"Backup catalog {catalog_file_path} to {catalog_path} successfully.")
        return True

    @abstractmethod
    def _exec_abort_backup(self):
        # 由各子类实现各自的逻辑
        pass

    @log_start()
    def allow_backup_in_local_node(self):
        """
        功能：判断任务是否能在此节点执行。此函数会判断主任务，准备子任务，备份子任务，后置子任务
        """
        # 查看集群是否在线

        if self._parse_params_obj.get_subjob_name() in (SaphanaSubjobName.PREPARE, SaphanaSubjobName.POST):
            return True
        try:
            status = self._saphana_cluster.get_cluster_status()
        except ErrCodeException as err:
            self._error_code = err.error_code
            return False
        if status != SaphanaClusterStatus.ONLINE:
            log.error("Cluster is offline.")
            self._error_code = SaphanaErrorCode.CLUSTER_NOT_ONLINE
            return False
        if (self._backup_db_name.upper() == self._system_db_name.upper()) \
                and (self._parse_params_obj.get_deploy_type() != DeployTypeEnum.SINGLE.value):
            # 日志及系统数据库备份只在主系统数据库的节点执行
            master_hostname = self._saphana_cluster.get_master_hostname(self._system_db_pwd_env)
            local_hostname = get_localhost_hostname()
            if master_hostname.upper() != local_hostname.upper():
                log.error(f"Host : {local_hostname} is not master, can not run.")
                return False
        return True

    @log_start()
    def query_job_permission(self):
        """
        功能：获取实例用户的属主
        返回值： uid,gid
        """
        return self._saphana_cluster.get_instance_user_group()

    @log_start()
    def check_backup_type(self):
        if self._backup_type == BackupTypeEnum.FULL_BACKUP or self._backup_type == BackupTypeEnum.LOG_BACKUP:
            return True
        # 判断是否需要增量转全量
        ret, last_copy_info = self._get_last_copy_info()
        if not ret or not last_copy_info:
            log.error(f"Fail to get last copy info: {last_copy_info}")
            self._error_code = SaphanaErrorCode.INC_TO_FULL
            return False
        # 1. 判断数据库的节点或者服务是否发生了变化
        last_db_info = SaphanaCopyInfoParam.get_db_info(last_copy_info)
        current_db_info = self._saphana_cluster.get_db_nodes_and_services_list(self._backup_db_user,
                                                                               self._backup_db_pwd_env)
        log.debug(f"Db info. Last: {last_db_info}, current: {current_db_info}")
        if len(last_db_info) != len(current_db_info) or set(last_db_info.keys()) != set(current_db_info.keys()):
            log.error("Distribute hosts changed.")
            self._error_code = SaphanaErrorCode.INC_TO_FULL
            return False
        for key, value in last_db_info.items():
            current_db_value = current_db_info[key]
            if set(value) != set(current_db_value):
                log.error(f"Db service on host {key} changed. Last: {value}, current: {current_db_info}")
                self._error_code = SaphanaErrorCode.INC_TO_FULL
                return False
        # 2. 是否发生了单租户多租户属性变更
        last_system_attr = SaphanaCopyInfoParam.get_system_attr(last_copy_info)
        if last_system_attr is None:
            log.warning("Fail to get last copy system attribute.")
        elif last_system_attr != self._saphana_cluster.is_multi_system():
            log.error(f"System attr changed from {last_system_attr} to {current_db_info}")
            self._error_code = SaphanaErrorCode.INC_TO_FULL
            return False
        # 如果是备份系统数据库，判断是否做过主备切换
        if self._saphana_cluster.is_system_db():
            last_backup_hostname = SaphanaCopyInfoParam.get_last_backup_hostname(last_copy_info)
            if last_backup_hostname != socket.gethostname():
                log.warning(f"Last backup hostname is {last_backup_hostname}")
                self._error_code = SaphanaErrorCode.INC_TO_FULL
                return False
        return True

    @log_start()
    def exec_prerequisite_task(self):
        # 执行前置任务
        # 如果是租户数据库，判断数据库是否在运行
        if not self._saphana_cluster.is_system_db():
            if not self._saphana_cluster.get_database_running_status(self._system_db_pwd_env):
                log.error(f"DB {self._backup_db_name} not running.")
                self._error_code = SaphanaErrorCode.DB_NOT_RUNNING
                return False
        # 判断是否能登录此数据库
        try:
            ret = self._saphana_cluster.login_database(self._backup_db_user, self._backup_db_pwd_env)
        except ErrCodeException as err:
            self._error_code = err.error_code
            return False
        if not ret:
            self._error_code = SaphanaErrorCode.ACCESS_DB_ERROR
            return False
        log.info(f"Login {self._backup_db_name} successfully")
        # 判断是否数据库所有服务都在运行
        try:
            ret, output = self._saphana_cluster.all_db_services_running(self._backup_db_user, self._backup_db_pwd_env)
        except ErrCodeException as err:
            self._error_code = err.error_code
            self._log_detail_param = err.parameter_list
            return False
        if not ret:
            log.error("Not all database services running.")
            self._error_code = SaphanaErrorCode.NOT_ALL_DB_SERVICE_RUNNING
            self._log_detail_param = output
            return False
        log.info("All db services are running.")
        # 判断是否有足够的代理执行备份
        if not self.check_agent():
            return False
        # 准备存储介质
        if not self._prepare_backup_storage():
            return False
        log.info("Prepare backup storage successfully.")
        return True

    def check_agent(self):
        if self._parse_params_obj.get_deploy_type() == DeployTypeEnum.SINGLE.value:
            return True
        agents_list = self._parse_params_obj.get_all_agents_ip()
        if not agents_list:
            log.error("Fail to get agents ip")
            return False
        log.debug(f"All agents ip : {agents_list}")
        try:
            self._saphana_cluster.check_agent_enough(agents_list, self._backup_db_user, self._backup_db_pwd_env,
                                                     self._parse_params_obj.get_all_agents_register_ip())
        except ErrCodeException as err:
            self._error_code = err.error_code
            self._log_detail_param = err.parameter_list
            log.error(f"Check agent enough fail for {err.error_message}")
            return False
        except Exception as common_err:
            log.error(f"Check agent enough fail for {common_err}")
            return False
        log.info("Check agents enough.")
        return True

    @log_start()
    def generate_job(self):
        """
        执行拆分子任务
        """
        sub_job_array = []
        sub_job = SubJobModel(jobId=self._parse_params_obj.get_maintask_id(), subJobId="",
                              jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
                              jobName=SaphanaSubjobName.BACKUP, jobPriority=2,
                              ignoreFailed=False, jobInfo="").dict(by_alias=True)
        sub_job_array.append(sub_job)
        # 拆一个准备子任务
        sub_job = SubJobModel(jobId=self._parse_params_obj.get_maintask_id(), subJobId="",
                              jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
                              jobName=SaphanaSubjobName.PREPARE, jobPriority=1,
                              policy=SubJobPolicyEnum.EVERY_NODE_ONE_TIME,
                              ignoreFailed=False, jobInfo="").dict(by_alias=True)
        sub_job_array.append(sub_job)
        log.info(f"Gen sub job success. {self._parse_params_obj.get_log_common()}")
        return sub_job_array

    @log_start()
    def exec_prepare_subtask(self):
        # 执行准备子任务
        if self._backup_type == BackupTypeEnum.LOG_BACKUP:
            mount_data_path = self._parse_params_obj.get_log_path()
        else:
            mount_data_path = self._parse_params_obj.get_data_path()
        if not mount_data_path:
            log.error(f"No usable data path({mount_data_path})")
            return False
        copy_id = self._parse_params_obj.get_copy_id()
        if not copy_id:
            return False
        if self._backup_type == BackupTypeEnum.LOG_BACKUP:
            data_path = os.path.join(mount_data_path, "data")
        else:
            data_path = os.path.join(mount_data_path, copy_id, "data")
        # 如果已经存在，需要删除
        if not self._remove_backup_link():
            return False
        # 创建软链接
        ret, err = CommonFuction.exec_shell_cmd(f"ln -s {data_path} {self._link_path}", need_verify=0)
        if not ret:
            log.error(f"Create link failed for {err},{data_path},{self._link_path}")
            return False
        cluster_uid, cluster_gid = self._saphana_cluster.get_instance_user_group()
        os.lchown(self._link_path, cluster_uid, cluster_gid)
        log.info("Exec prepare task successfully.")
        return True

    @log_start()
    def exec_post_job(self):
        # 执行后置任务
        # 1. 删除软链接文件
        self._remove_backup_link()
        # 获取备份结果，如果失败，要删除数据仓以及元数据仓的数据
        backup_result = self._parse_params_obj.get_backup_result()
        if backup_result != BackupJobResult.FAIL:
            return
        if self._backup_type == BackupTypeEnum.LOG_BACKUP:
            mount_data_path = self._parse_params_obj.get_log_path()
        else:
            mount_data_path = self._parse_params_obj.get_data_path()
        cache_path = self._parse_params_obj.get_cache_path()
        copy_id = self._parse_params_obj.get_copy_id()
        data_path = os.path.join(mount_data_path, copy_id)
        if os.path.exists(data_path):
            shutil.rmtree(data_path)
        if os.path.exists(os.path.join(cache_path, copy_id)):
            shutil.rmtree(os.path.join(cache_path, copy_id))

    def get_error_code(self):
        return self._error_code

    def get_error_params(self):
        return self._log_detail_param

    def report_copy_info(self):
        """
        上报副本信息
        """
        try:
            copy_info = self._generate_copy_info()
        except ErrCodeException as err:
            self._error_code = err.error_code
            return False
        log.debug(f"Copy info: {copy_info}")
        if not copy_info:
            log.error("Fail to get copy info.")
            return False
        ret = CommonFuction.exec_rc_tool_cmd(RpcToolInterface.REPORT_COPY_INFO, copy_info,
                                             self._parse_params_obj.get_maintask_id())
        if not ret:
            log.error(f"Fail to report copy info({copy_info})")
        return ret

    def abort_backup(self):
        log.info(f"Start to abort task {self._parse_params_obj.get_maintask_id()}")
        self._exec_abort_backup()
        # 杀掉除中止进程外的其他的同jobID的进程
        work_pids = CommonFuction.filter_pids(self._parse_params_obj.get_maintask_id(), self._pid)
        for pid in work_pids:
            try:
                psutil.Process(pid).kill()
            except Exception as err:
                log.warning(f"Get process err: {err}.")
                continue
            log.debug(f"kill process: {pid}.")

    def _init_link_path(self):
        try:
            dir_instance = self._saphana_cluster.get_dir_instance()
        except Exception as exception_str:
            log.error(f"Get dir instance failed for {exception_str}")
            return False
        copy_id = self._parse_params_obj.get_copy_id()
        self._link_path = os.path.join(dir_instance, f"data_{copy_id}")
        return True

    def _init_system_db_param(self):
        # 初始化系统数据库相关参数
        system_db_info = self._parse_params_obj.get_system_db_info()
        if not system_db_info:
            log.warning("This protect object is a system db.")
            self._system_db_user = self._backup_db_user
            self._system_db_pwd_env = self._backup_db_pwd_env
            return
        custom_param_dict = SaphanaResourceParam.init_custom_params(system_db_info)
        if SaphanaJsonConstant.SYSTEM_DB_PASSWORD not in custom_param_dict.keys() or \
                SaphanaJsonConstant.SYSTEM_DB_USER not in custom_param_dict.keys():
            log.error("No system db params.")
            return
        self._system_db_pwd_env = f"{SaphanaJsonConstant.SYSTEM_DB_PASSWORD}_{self._pid}"
        self._system_db_user = custom_param_dict[SaphanaJsonConstant.SYSTEM_DB_USER]
        add_env_param(self._system_db_pwd_env, custom_param_dict[SaphanaJsonConstant.SYSTEM_DB_PASSWORD])
        clear(custom_param_dict[SaphanaJsonConstant.SYSTEM_DB_PASSWORD])

    def _upload_backup_progress(self):
        # 定时上报备份进度
        register_ip = get_localhost_register_ip(self._parse_params_obj.get_all_agents_register_ip())
        while True:
            self._get_backup_progress()
            temp_job_status = self._job_status
            temp_logdetail = self._error_code if self._error_code else None
            if temp_job_status == SubJobStatusEnum.FAILED:
                log.error("Backup fail.")
                log_detail = LogDetail(logInfo=SaphanaTaskLabel.BACKUP_FAIL_LABEL,
                                       logInfoParam=[f"{self._parse_params_obj.get_subtask_id()}"],
                                       logTimestamp=int(time.time()), logLevel=DBLogLevel.ERROR,
                                       logDetail=temp_logdetail,
                                       logDetailParam=self._log_detail_param)
            elif temp_job_status == SubJobStatusEnum.COMPLETED:
                log.info("Backup complete.")
                log_detail = LogDetail(logInfo=SaphanaTaskLabel.BACKUP_SUCCESS_LABEL,
                                       logInfoParam=[f"{self._parse_params_obj.get_subtask_id()}",
                                                     f"{int(self._total_backup_size / 1024 / 1024)} MB"],
                                       logTimestamp=int(time.time()), logLevel=DBLogLevel.INFO)
            else:
                log_detail = LogDetail(logInfo=SaphanaTaskLabel.BACKUP_RUNNING_LABEL,
                                       logInfoParam=[register_ip, f"{self._parse_params_obj.get_subtask_id()}",
                                                     f"{int(self._transferred_size / 1024 / 1024)} MB"],
                                       logTimestamp=int(time.time()), logLevel=DBLogLevel.INFO)
            progress_dict = SubJobDetails(taskId=self._parse_params_obj.get_maintask_id(),
                                          subTaskId=self._parse_params_obj.get_subtask_id(),
                                          taskStatus=temp_job_status, progress=self._progress,
                                          dataSize=self._total_backup_size / 1024, logDetail=[log_detail])
            log.debug("Running send.")
            CommonFuction.report_job_details(self._parse_params_obj.get_maintask_id(),
                                             progress_dict.dict(by_alias=True))
            if temp_job_status != SubJobStatusEnum.RUNNING:
                log.debug(f"Stop report progress thread. current job status is : {temp_job_status}")
                break
            time.sleep(self._query_progress_interval)

    def _remove_backup_link(self):
        # 删除备份目录的软链接文件
        log.info("Start to remove backup link.")
        # 如果已经存在，需要删除
        if os.path.islink(self._link_path):
            ret, _ = CommonFuction.exec_shell_cmd(f"unlink {self._link_path}", need_verify=0)
            if not ret:
                log.error("Link already exists and unlink failed.")
                return False
        return True

    def _query_previous_copy_info(self, copy_types: list[CopyDataTypeEnum]) -> dict:
        input_param = {
            SaphanaRpcParamKey.APPLICATION: self._parse_params_obj.get_protect_object(),
            SaphanaRpcParamKey.TYPES: copy_types,
            SaphanaRpcParamKey.COPY_ID: self._parse_params_obj.get_copy_id()
        }
        ret, previous_copy_info = CommonFuction.exec_rc_tool_cmd(
            RpcToolInterface.QUERY_PREVIOUS_COPY, input_param, self._parse_params_obj.get_maintask_id())
        previous_copy_info: dict
        if not ret or not previous_copy_info:
            log.error(f"Fail to get last copy info, copy types: {copy_types}.")
            return {}
        return previous_copy_info

    def _get_last_copy_info(self):
        last_copy_type = [CopyDataTypeEnum.FULL_COPY.value] if self._backup_type == BackupTypeEnum.DIFF_BACKUP \
            else [CopyDataTypeEnum.FULL_COPY.value, CopyDataTypeEnum.INCREMENT_COPY.value] if \
            self._backup_type == BackupTypeEnum.INCRE_BACKUP else \
            [CopyDataTypeEnum.FULL_COPY.value, CopyDataTypeEnum.INCREMENT_COPY.value, CopyDataTypeEnum.DIFF_COPY.value]
        input_param = {
            SaphanaRpcParamKey.APPLICATION: self._parse_params_obj.get_protect_object(),
            SaphanaRpcParamKey.TYPES: last_copy_type,
            SaphanaRpcParamKey.COPY_ID: self._parse_params_obj.get_copy_id(),
            SaphanaRpcParamKey.JOB_ID: self._parse_params_obj.get_maintask_id()
        }
        return CommonFuction.exec_rc_tool_cmd(RpcToolInterface.QUERY_PREVIOUS_COPY, input_param,
                                              self._parse_params_obj.get_maintask_id())

    def _exec_db_cmd(self, db_name, db_user, db_pwd, cmd, ac_type=SaphanaDbActionType.QUERY_CMD):
        try:
            ret, output = self._saphana_cluster.execute_query_in_db(db_name, db_user, db_pwd, cmd, ac_type)
        except ErrCodeException as err:
            self._error_code = err.error_code
            self._job_status = SubJobStatusEnum.FAILED
            log.error(f"Exec cmd {cmd} failed for {err.error_message}")
            return False, ""
        except Exception as unknown_err:
            log.error(f"Exec cmd {cmd} failed for {unknown_err}")
            self._job_status = SubJobStatusEnum.FAILED
            return False, ""
        if not ret:
            log.error(f"Exec cmd {cmd} failed for {output}")
        return ret, output
