#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import shutil
import socket
import stat
import time
from abc import ABC, abstractmethod
from datetime import datetime, timezone
import pwd

import psutil

from common.cleaner import clear
from common.common import output_execution_result_ex, check_path_legal
from common.common_models import SubJobModel, SubJobDetails, LogDetail
from common.const import BackupTypeEnum, SubJobTypeEnum, SubJobPolicyEnum, SubJobStatusEnum, DBLogLevel, \
    BackupJobResult, RpcToolInterface, CopyDataTypeEnum, AuthType
from common.enums.common_enums import DeployTypeEnum
from common.exception.common_exception import ErrCodeException
from common.parse_parafile import add_env_param
from saphana.backup.saphana_parse_backup_params import SaphanaParseBackupParam, SaphanaCopyInfoParam
from generaldb.saphana.comm.common_util import log, log_start, get_localhost_hostname, get_localhost_register_ip
from generaldb.saphana.comm.saphana_const import SaphanaJsonConstant, SaphanaErrorCode, SaphanaClusterStatus, \
    SaphanaSubjobName, SaphanaTaskLabel, SaphanaRpcParamKey, SaphanaConst, SaphanaDbActionType
from generaldb.saphana.comm.saphana_common_function import CommonFuction
from saphana.resource.saphana_cluster_manager import SaphanaClusterManage
from saphana.common.saphana_const import SapConst
from saphana.resource.common_util import HanaCommonUtil


class SaphanaBackupParent(ABC):
    def __init__(self, parse_params_obj: SaphanaParseBackupParam, pid, job_id, sub_job_id):
        if not parse_params_obj:
            log.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        self._parse_params_obj = parse_params_obj
        self._backup_type = self._parse_params_obj.get_backup_type()
        self._backup_db_name = self._parse_params_obj.get_backup_db_name()
        self._backup_db_user = self._parse_params_obj.get_backup_db_user()
        self._backup_db_pwd_env = self._parse_params_obj.get_backup_db_pwd_env()
        self._sid = self._parse_params_obj.get_sid()
        self._system_db_port = self._parse_params_obj.get_system_db_port()
        self._system_db_user = ""
        self._system_db_pwd_env = ""
        self._pid = pid
        self._job_id = job_id
        self._sub_job_id = sub_job_id
        self._systemdb_auth_type = int(self._parse_params_obj.get_systemdb_auth_type())
        self._backupdb_auth_type = int(self._parse_params_obj.get_protectdb_auth_type())
        self._init_system_db_param()
        self._saphana_cluster = SaphanaClusterManage({SaphanaJsonConstant.SYSTEM_ID: self._sid,
                                                      SaphanaJsonConstant.SYSTEM_DB_PORT: self._system_db_port,
                                                      SaphanaJsonConstant.PROTECT_DATABASE: self._backup_db_name,
                                                      SaphanaJsonConstant.SYSTEM_DB_USER: self._system_db_user,
                                                      SaphanaJsonConstant.SYSTEMDB_AUTH_TYPE: self._systemdb_auth_type,
                                                      SaphanaJsonConstant.PROTECTDB_AUTH_TYPE:
                                                          self._backupdb_auth_type},
                                                     self._pid)
        self._error_code = 0  # 错误码
        self.progress = 0
        self._job_status = SubJobStatusEnum.RUNNING
        self._total_backup_size = 0  # 备份数据总量，数据库查出来是以byte为单位
        self._log_detail_param = []  # 错误码参数
        self._transferred_size = 0  # 已备份数据量
        self._query_progress_interval = 30  # 定期查询进度的间隔，30s
        self._previous_backup_id = 0  # 上一次备份成功的 backup_id
        self._backup_id = 0  # 本次备份 id ，日志备份则是最后一个 log 副本 backup id
        self._min_lines = 3  # select结果除了查出的数据外其他的行数,一行查询命令，一行属性，最后一行为空行
        self.backup_start_time = time.time()  # 备份任务开始时间
        self.backup_end_time = 0  # 备份任务开始时间

    @staticmethod
    def utc_to_timestamp(utc_time):
        # 将从saphana获取到的utc时间转换为时间戳
        sys_start_time = SaphanaClusterManage.erase_tail(utc_time)
        sys_start_time = sys_start_time if not sys_start_time.split('.') else sys_start_time.split('.')[0]
        try:
            time_stamp = int(datetime.strptime(sys_start_time, "%Y-%m-%d %H:%M:%S").replace(tzinfo=timezone.utc)
                             .timestamp())
        except Exception:
            log.error(f"Illegal time format: {sys_start_time}")
            time_stamp = 0
        return time_stamp

    @abstractmethod
    def _prepare_backup_storage(self):
        # 准备备份存储介质
        pass

    @abstractmethod
    def _get_backup_progress(self):
        # 获取备份进度，数据备份与日志备份各自实现
        pass

    @abstractmethod
    def _generate_copy_info(self):
        # 生成副本信息，数据备份与日志备份各自实现
        pass

    @abstractmethod
    def _exec_abort_backup(self):
        # 由各子类实现各自的逻辑
        pass

    @log_start()
    def allow_backup_in_local_node(self):
        """
        功能：判断任务是否能在此节点执行。此函数会判断主任务，准备子任务，备份子任务，后置子任务
        """
        # 查看集群是否在线
        if self._parse_params_obj.get_subjob_name() in (SaphanaSubjobName.PREPARE, SaphanaSubjobName.POST):
            return True
        try:
            status = self._saphana_cluster.get_cluster_status()
        except ErrCodeException as err:
            self._error_code = err.error_code
            return False
        if status != SaphanaClusterStatus.ONLINE:
            log.error("Cluster is offline.")
            self._error_code = SaphanaErrorCode.CLUSTER_NOT_ONLINE
            return False
        if self._parse_params_obj.get_deploy_type() != DeployTypeEnum.SINGLE.value:
            # 日志备份和 backint 方式的数据备份只在主系统数据库的节点执行
            master_hostname = self._saphana_cluster.get_master_hostname(self._system_db_pwd_env)
            local_hostname = get_localhost_hostname()
            if master_hostname.upper() != local_hostname.upper():
                log.error(f"Host : {local_hostname} is not master, can not run.")
                return False
        return True

    @log_start()
    def query_job_permission(self):
        """
        功能：获取实例用户的属主
        返回值： uid,gid
        """
        return self._saphana_cluster.get_instance_user_group()

    @log_start()
    def check_backup_type(self):
        if self._backup_type == BackupTypeEnum.FULL_BACKUP or self._backup_type == BackupTypeEnum.LOG_BACKUP:
            return True
        # 判断是否需要增量转全量
        ret, last_copy_info = self._get_last_copy_info()
        if not ret or not last_copy_info:
            log.error(f"Fail to get last copy info: {last_copy_info}")
            self._error_code = SaphanaErrorCode.INC_TO_FULL
            return False
        # 1. 判断数据库的节点或者服务是否发生了变化
        last_db_info = SaphanaCopyInfoParam.get_db_info(last_copy_info)
        current_db_info = self._saphana_cluster.get_db_nodes_and_services_list(self._backup_db_user,
                                                                               self._backup_db_pwd_env)
        log.info(f"Db info. Last: {last_db_info}, current: {current_db_info}")
        if len(last_db_info) != len(current_db_info) or set(last_db_info.keys()) != set(current_db_info.keys()):
            log.error("Distribute hosts changed.")
            self._error_code = SaphanaErrorCode.INC_TO_FULL
            return False
        for key, value in last_db_info.items():
            current_db_value = current_db_info.get(key)
            if set(value) != set(current_db_value):
                log.error(f"Db service on host {key} changed. Last: {value}, current: {current_db_value}")
                self._error_code = SaphanaErrorCode.INC_TO_FULL
                return False
        # 2. 是否发生了单租户多租户属性变更
        last_system_attr = SaphanaCopyInfoParam.get_system_attr(last_copy_info)
        if last_system_attr is None:
            log.warning("Fail to get last copy system attribute.")
        elif last_system_attr != self._saphana_cluster.is_multi_system():
            log.error(f"System attr changed from {last_system_attr} to {current_db_info}")
            self._error_code = SaphanaErrorCode.INC_TO_FULL
            return False
        # 如果是备份系统数据库，判断是否做过主备切换
        if self._saphana_cluster.is_system_db():
            last_backup_hostname = SaphanaCopyInfoParam.get_last_backup_hostname(last_copy_info)
            if last_backup_hostname != socket.gethostname():
                log.warning(f"Last backup hostname is {last_backup_hostname}")
                self._error_code = SaphanaErrorCode.INC_TO_FULL
                return False
        return True

    @log_start()
    def exec_prerequisite_task(self):
        # 执行前置任务
        # 如果是租户数据库，判断数据库是否在运行
        if not self._saphana_cluster.is_system_db():
            if not self._saphana_cluster.get_database_running_status(self._system_db_pwd_env):
                log.error(f"DB {self._backup_db_name} not running.")
                self._error_code = SaphanaErrorCode.DB_NOT_RUNNING
                return False
        # 判断是否能登录此数据库
        try:
            ret = self._saphana_cluster.login_database(self._backup_db_user, self._backup_db_pwd_env)
        except ErrCodeException as err:
            self._error_code = err.error_code
            return False
        if not ret:
            self._error_code = SaphanaErrorCode.ACCESS_DB_ERROR
            return False
        log.info(f"Login {self._backup_db_name} successfully")
        # 判断是否数据库所有服务都在运行
        try:
            ret, output = self._saphana_cluster.all_db_services_running(self._backup_db_user, self._backup_db_pwd_env)
        except ErrCodeException as err:
            self._error_code = err.error_code
            self._log_detail_param = err.parameter_list
            return False
        if not ret:
            log.error("Not all database services running.")
            self._error_code = SaphanaErrorCode.NOT_ALL_DB_SERVICE_RUNNING
            self._log_detail_param = output
            return False
        log.info("All db services are running.")
        # 判断是否有足够的代理执行备份
        if not self.check_agent():
            return False
        # 准备存储介质
        if not self._prepare_backup_storage():
            return False
        log.info("Prepare backup storage successfully.")
        return True

    def check_agent(self):
        agents_list = self._parse_params_obj.get_all_agents_ip()
        if not agents_list:
            log.error("Fail to get agents ip")
            return False
        log.info(f"All agents ip : {agents_list}")
        try:
            self._saphana_cluster.check_agent_enough(agents_list, self._backup_db_user, self._backup_db_pwd_env,
                                                     self._parse_params_obj.get_all_agents_register_ip())
        except ErrCodeException as err:
            self._error_code = err.error_code
            self._log_detail_param = err.parameter_list
            log.error(f"Check agent enough fail for {err.error_message}")
            return False
        except Exception as common_err:
            log.error(f"Check agent enough fail for {common_err}")
            return False
        log.info("Check agents enough.")
        return True

    @log_start()
    def generate_job(self):
        """
        执行拆分子任务
        """
        sub_job_array = []
        sub_job = SubJobModel(jobId=self._parse_params_obj.get_maintask_id(), subJobId="",
                              jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
                              jobName=SaphanaSubjobName.BACKUP, jobPriority=2,
                              ignoreFailed=False, jobInfo="").dict(by_alias=True)
        sub_job_array.append(sub_job)
        # 拆一个准备子任务
        sub_job = SubJobModel(jobId=self._parse_params_obj.get_maintask_id(), subJobId="",
                              jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
                              jobName=SaphanaSubjobName.PREPARE, jobPriority=1,
                              policy=SubJobPolicyEnum.EVERY_NODE_ONE_TIME,
                              ignoreFailed=False, jobInfo="").dict(by_alias=True)
        sub_job_array.append(sub_job)
        log.info(f"Gen sub job success. {self._parse_params_obj.get_log_common()}")
        return sub_job_array

    def prepare_param_for_backint(self):
        param = dict()
        mount_data_path = self._parse_params_obj.get_data_path()
        meta_path = self._parse_params_obj.get_meta_path()
        cache_path = self._parse_params_obj.get_cache_path()
        copy_id = self._parse_params_obj.get_copy_id()
        data_path = os.path.join(mount_data_path, copy_id)
        param["pid"] = self._pid
        param["job_id"] = self._job_id
        param["sub_job_id"] = self._sub_job_id
        param["db_name"] = self._backup_db_name
        param["data_path"] = data_path
        param["cache_path"] = cache_path
        param["meta_path"] = meta_path

        log.info(f"data_path: {os.path.realpath(data_path)}")
        file_name = SapConst.BACKUP_PARAM_FILE.format(self._saphana_cluster.db_name_prefix() + self._backup_db_name)
        file_path = os.path.join(SapConst.SAPHANA_PARAM_PATH, file_name)
        output_execution_result_ex(file_path, param)
        cluster_uid, cluster_gid = self._saphana_cluster.get_instance_user_group()
        os.chown(file_path, uid=cluster_uid, gid=cluster_gid)

    @log_start()
    def exec_prepare_subtask(self):
        # 检查backint工具
        if not HanaCommonUtil.check_backint_tool(self._sid, self._saphana_cluster.get_dir_instance()):
            return False
        # 写参数文件供backint使用
        self.prepare_param_for_backint()
        log_repo = self._parse_params_obj.get_log_repo()
        # 没开日志备份则清空 backint.conf
        if not log_repo:
            hdbconfig_file = f"{self._saphana_cluster.get_dir_instance()}/{SapConst.BACKINT_CONFIG_SUFFIX}"
            with open(hdbconfig_file, "w") as file:
                pass
            return True
        return True

    @log_start()
    def exec_post_job(self):
        # 执行后置任务
        # 获取备份结果，如果失败，要删除数据仓以及元数据仓的数据
        # 生产环境可能异常，无法登录数据库用户
        posiible_file_names = [
            SapConst.BACKUP_PARAM_FILE.format(self._backup_db_name),
            SapConst.BACKUP_PARAM_FILE.format(f"DB_{self._backup_db_name}")
        ]
        for file_name in posiible_file_names:
            file_path = os.path.join(SapConst.SAPHANA_PARAM_PATH, file_name)
            if os.path.exists(file_path) and check_path_legal(file_path, SapConst.SAPHANA_PARAM_PATH):
                os.remove(file_path)
        backup_result = self._parse_params_obj.get_backup_result()
        if backup_result != BackupJobResult.FAIL:
            return
        if self._backup_type == BackupTypeEnum.LOG_BACKUP:
            mount_data_path = self._parse_params_obj.get_log_path()
        else:
            mount_data_path = self._parse_params_obj.get_data_path()
        cache_path = self._parse_params_obj.get_cache_path()
        copy_id = self._parse_params_obj.get_copy_id()
        data_path = os.path.join(mount_data_path, copy_id)
        if os.path.exists(data_path):
            shutil.rmtree(data_path)
        if os.path.exists(os.path.join(cache_path, copy_id)):
            shutil.rmtree(os.path.join(cache_path, copy_id))

    def get_error_code(self):
        return self._error_code

    def get_error_params(self):
        return self._log_detail_param

    def report_copy_info(self):
        """
        上报副本信息
        """
        try:
            copy_info = self._generate_copy_info()
        except ErrCodeException as err:
            self._error_code = err.error_code
            return False
        log.info(f"Copy info: {copy_info}.")
        if not copy_info:
            log.error("Fail to get copy info.")
            return False
        ret = CommonFuction.exec_rc_tool_cmd(RpcToolInterface.REPORT_COPY_INFO, copy_info,
                                             self._parse_params_obj.get_maintask_id())
        if not ret:
            log.error(f"Fail to report copy info({copy_info})")
        log.info("Report copy info successfully.")
        return ret

    def abort_backup(self):
        log.info(f"Start to abort task {self._parse_params_obj.get_maintask_id()}")
        self._exec_abort_backup()
        # 杀掉除中止进程外的其他的同jobID的进程
        work_pids = CommonFuction.filter_pids(self._parse_params_obj.get_maintask_id(), self._pid)
        for pid in work_pids:
            try:
                psutil.Process(pid).kill()
            except Exception as err:
                log.warning(f"Get process err: {err}.")
                continue
            log.info(f"kill process: {pid}.")

    def _exec_db_cmd(self, db_name, db_user, db_pwd, cmd, ac_type=SaphanaDbActionType.QUERY_CMD):
        try:
            ret, output = self._saphana_cluster.execute_query_in_db(db_name, db_user, db_pwd, cmd, ac_type)
        except ErrCodeException as err:
            self._error_code = err.error_code
            self._job_status = SubJobStatusEnum.FAILED
            log.error(f"Exec cmd {cmd} failed for {err.error_message}")
            return False, ""
        except Exception as unknown_err:
            log.error(f"Exec cmd {cmd} failed for {unknown_err}")
            self._job_status = SubJobStatusEnum.FAILED
            return False, ""
        if not ret:
            log.error(f"Exec cmd {cmd} failed for {output}")
        return ret, output

    def _query_previous_copy_info(self, copy_types: list[CopyDataTypeEnum]) -> dict:
        input_param = {
            SaphanaRpcParamKey.APPLICATION: self._parse_params_obj.get_protect_object(),
            SaphanaRpcParamKey.TYPES: copy_types,
            SaphanaRpcParamKey.COPY_ID: self._parse_params_obj.get_copy_id()
        }
        ret, previous_copy_info = CommonFuction.exec_rc_tool_cmd(
            RpcToolInterface.QUERY_PREVIOUS_COPY, input_param, self._parse_params_obj.get_maintask_id())
        previous_copy_info: dict
        if not ret or not previous_copy_info:
            log.error(f"Fail to get last copy info, copy types: {copy_types}.")
            return {}
        return previous_copy_info

    def _get_last_copy_info(self):
        last_copy_type = [CopyDataTypeEnum.FULL_COPY.value] if self._backup_type == BackupTypeEnum.DIFF_BACKUP \
            else [CopyDataTypeEnum.FULL_COPY.value, CopyDataTypeEnum.INCREMENT_COPY.value] if \
            self._backup_type == BackupTypeEnum.INCRE_BACKUP else \
            [CopyDataTypeEnum.FULL_COPY.value, CopyDataTypeEnum.INCREMENT_COPY.value, CopyDataTypeEnum.DIFF_COPY.value]
        input_param = {
            SaphanaRpcParamKey.APPLICATION: self._parse_params_obj.get_protect_object(),
            SaphanaRpcParamKey.TYPES: last_copy_type,
            SaphanaRpcParamKey.COPY_ID: self._parse_params_obj.get_copy_id()
        }
        return CommonFuction.exec_rc_tool_cmd(RpcToolInterface.QUERY_PREVIOUS_COPY, input_param,
                                              self._parse_params_obj.get_maintask_id())

    def _init_system_db_param(self):
        # 初始化系统数据库相关参数
        if self._systemdb_auth_type == AuthType.OTHER.value:
            self._system_db_pwd_env = f"{SaphanaJsonConstant.JOB}_{SaphanaJsonConstant.PROTECT_ENV}" \
                                      f"_{SaphanaJsonConstant.AUTH}_" \
                                      f"{SaphanaJsonConstant.EXTEND_INFO}_{SaphanaJsonConstant.KEY_INFO}_{self._pid}"
        else:
            self._system_db_user = self._parse_params_obj.get_system_db_user()
            system_db_pwd = self._parse_params_obj.get_system_db_pwd()
            if not self._system_db_user or not system_db_pwd:
                log.error("System user or password not exist")
                clear(system_db_pwd)
                raise Exception("System user or password not exist")
            self._system_db_pwd_env = f"{SaphanaJsonConstant.SYSTEM_DB_PASSWORD}_{self._pid}"
            add_env_param(self._system_db_pwd_env, system_db_pwd)
            clear(system_db_pwd)
        if self._backupdb_auth_type == AuthType.OTHER.value:
            db_type = self._parse_params_obj.get_backup_db_type()
            if db_type == SaphanaConst.SYSTEM_DATABASE:
                self._backup_db_pwd_env = f"{SaphanaJsonConstant.JOB}_{SaphanaJsonConstant.PROTECT_ENV}_" \
                                      f"{SaphanaJsonConstant.AUTH}_" \
                                      f"{SaphanaJsonConstant.EXTEND_INFO}_{SaphanaJsonConstant.KEY_INFO}_{self._pid}"
            else:
                self._backup_db_pwd_env = f"{SaphanaJsonConstant.JOB}_{SaphanaJsonConstant.PROTECT_OBJECT}_" \
                    f"{SaphanaJsonConstant.AUTH}_" \
                    f"{SaphanaJsonConstant.EXTEND_INFO}_{SaphanaJsonConstant.KEY_INFO}_{self._pid}"

    def _upload_backup_progress(self):
        # 定时上报备份进度
        register_ip = get_localhost_register_ip(self._parse_params_obj.get_all_agents_register_ip())
        while True:
            self._get_backup_progress()
            temp_job_status = self._job_status
            temp_logdetail = self._error_code if self._error_code else None
            if temp_job_status == SubJobStatusEnum.FAILED:
                log.error("Backup fail.")
                log_detail = LogDetail(logInfo=SaphanaTaskLabel.BACKUP_FAIL_LABEL,
                                       logInfoParam=[f"{self._parse_params_obj.get_subtask_id()}"],
                                       logTimestamp=int(time.time()), logLevel=DBLogLevel.ERROR,
                                       logDetail=temp_logdetail,
                                       logDetailParam=self._log_detail_param)
            elif temp_job_status == SubJobStatusEnum.COMPLETED:
                log.info("Backup complete.")
                log_detail = LogDetail(logInfo=SaphanaTaskLabel.BACKUP_SUCCESS_LABEL,
                                       logInfoParam=[f"{self._parse_params_obj.get_subtask_id()}",
                                                     f"{int(self._total_backup_size / 1024 / 1024)} MB"],
                                       logTimestamp=int(time.time()), logLevel=DBLogLevel.INFO)
                self._transferred_size = self._total_backup_size
            else:
                log_detail = LogDetail(logInfo=SaphanaTaskLabel.BACKUP_RUNNING_LABEL,
                                       logInfoParam=[register_ip, f"{self._parse_params_obj.get_subtask_id()}",
                                                     f"{int(self._transferred_size / 1024 / 1024)} MB"],
                                       logTimestamp=int(time.time()), logLevel=DBLogLevel.INFO)
            speed = (self._transferred_size / ((self.backup_end_time or time.time()) - self.backup_start_time)) / 1024
            log.info(f"Backup running. {speed}")
            progress_dict = SubJobDetails(taskId=self._parse_params_obj.get_maintask_id(),
                                          subTaskId=self._parse_params_obj.get_subtask_id(),
                                          taskStatus=temp_job_status, progress=self.progress,
                                          dataSize=self._total_backup_size / 1024, logDetail=[log_detail],
                                          speed=speed)
            CommonFuction.report_job_details(self._parse_params_obj.get_maintask_id(),
                                             progress_dict.dict(by_alias=True))
            if temp_job_status != SubJobStatusEnum.RUNNING:
                log.info(f"Stop report progress thread. current job status is : {temp_job_status}.")
                break
            time.sleep(self._query_progress_interval)

    def _get_log_backup_path(self):
        return os.path.join(
            self._parse_params_obj.get_log_backup_path(),
            f"{self._saphana_cluster.db_name_prefix()}{self._backup_db_name}")

    def _get_catalog_id(self):
        catalog_backup_id = 0
        if not self._backup_id:
            log.error("After backup, the backup_id didn't not set value.")
            return catalog_backup_id
        query_cmd = f"select top 1 backup_id from M_BACKUP_CATALOG_FILES where backup_id > {self._backup_id} " \
                    f"and source_type_name = 'catalog' order by entry_id asc"
        try:
            ret, output = self._saphana_cluster.execute_query_in_db(self._backup_db_name, self._backup_db_user,
                                                                    self._backup_db_pwd_env, query_cmd)
        except ErrCodeException as err:
            self._error_code = err.error_code
            log.error("Get catalog backup info failed.")
            return catalog_backup_id
        if not ret:
            log.error(f"Get catalog backup info failed for {output}")
            return catalog_backup_id
        min_lines = 3  # 一行查询命令，一行属性，最后一行为空行
        output = output.split("\n")
        if len(output) <= min_lines:
            log.warning(f"No progress record. Output: {output}")
            return catalog_backup_id
        return self._saphana_cluster.erase_tail(output[2])
