#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import json
import time
import signal

from concurrent.futures import ThreadPoolExecutor
from redis import log
import psutil

from redis.common.common import format_capacity_convert, report_job_details
from redis.common.parse_param import redis_parse_param_with_jsonschema
from redis.schemas.redis_info_schema import MasterSlavePair

from common.common import output_result_file
from common.const import SubJobStatusEnum, SubJobPriorityEnum
from common.common_models import SubJobDetails, LogDetail
from common.util.exec_utils import exec_mkdir_cmd, exec_cp_cmd, su_exec_rm_cmd
from redis.common.const import ErrorCode, CommandReturnCode, NodeRole, BackupJobParam, ExtendInfo, ProgressType, \
    BackupRedisCmd, NodeStatus, BackupJobResult, LogLevel, ExecutePolicy
from redis.common.exception import CustomError
from redis.schemas.job_schema import SubJob
from redis.service.redis_client import RedisClient
from redis.service.cluster_res_service import ClusterResManager
from redis.service.job_progress import JobProgress
from redis.schemas.event_message import CommonBodyErrorResponse, PermissionInfo


class BackupTaskManager:
    """
    备份任务相关接口
    """

    def __init__(self, pid, job_id, sub_job_id):
        self.pool = ThreadPoolExecutor(max_workers=4, thread_name_prefix='redis-backup')
        self.pid = pid
        self.job_id = job_id
        self.sub_job_id = sub_job_id
        self.exec_bgsave_success_msg = ["Background saving started", "ERR Background save already in progress"]

    @staticmethod
    def allow_backup_in_local_node(pid, job_id, sub_job_id):
        log.info(f'start to execute allow_backup_in_local_node, job_id:{job_id}, sub_job_id:{sub_job_id}')
        try:
            redis_parse_param_with_jsonschema(pid)
        except Exception as err:
            log.error(err, exec_info=True)
        response = CommonBodyErrorResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def allow_backup_sub_job_in_local_node(pid, job_id, sub_job_id):
        log.info(f'start to execute allow_backup_sub_job_in_local_node, job_id:{job_id}, sub_job_id:{sub_job_id}')
        try:
            redis_parse_param_with_jsonschema(pid)
        except Exception as err:
            log.error(err, exec_info=True)
        response = CommonBodyErrorResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def query_job_permission(pid, job_id):
        log.info(f'start to execute query_job_permission, job_id:{job_id}')
        try:
            redis_parse_param_with_jsonschema(pid)
        except Exception as err:
            log.error(err, exec_info=True)
        output_result_file(pid, PermissionInfo().dict(by_alias=True))

    @staticmethod
    def backup_pre_job(pid, job_id):
        log.info(f'start to execute backup_pre_job, job_id:{job_id}')
        try:
            redis_parse_param_with_jsonschema(pid)
        except Exception as err:
            log.error(err, exec_info=True)
        output_result_file(pid, CommonBodyErrorResponse().dict(by_alias=True))

    @staticmethod
    def generate_sub_job(pid, job_id):
        log.info(f'start to generate_sub_job, job_id:{job_id}')
        response = []
        try:
            protect_env = redis_parse_param_with_jsonschema(pid)[BackupJobParam.JOB][BackupJobParam.PROTECT_ENV]
            nodes = protect_env[BackupJobParam.NODES]
            log.info(f"instance num:{len(nodes)}")
            pairs = BackupTaskManager.build_master_slave_pair(nodes)
            for pair in pairs:
                log.info(f'generate sub job, cur pair: {pair.host}')
                if len(pair.nodes) == 0:
                    continue
                save_node = BackupTaskManager.get_save_rdb_node(pair.nodes)
                if save_node is None:
                    log.info(f'generate sub job, nodes len: {len(nodes)}, save node is none')
                    continue
                extend_info = save_node[BackupJobParam.EXTEND_INFO]
                agent_id = extend_info[ExtendInfo.AGENT_ID]
                job_info = {
                    "ip": extend_info[ExtendInfo.IP],
                    "port": extend_info[ExtendInfo.PORT]
                }
                response.append(
                    SubJob(jobId=job_id, execNodeId=agent_id, jobName=save_node[ExtendInfo.ID],
                           jobInfo=json.dumps(job_info)).dict(by_alias=True))
            # 如果没有子任务，则报错
            if len(response) == 0:
                log.error(f'job id: {job_id}, generate zero sub job')
                raise Exception(f'job id: {job_id}, generate zero sub job')
            response.append(
                SubJob(jobId=job_id, policy=ExecutePolicy.ANY_NODE, jobName='queryCopy',
                       jobPriority=SubJobPriorityEnum.JOB_PRIORITY_4).dict(by_alias=True))
            log.info(f'{response}')
        except Exception as err:
            log.error(err, exc_info=True)
            # 生成子任务异常，非0退出
            return False
        output_result_file(pid, response)
        return True

    @staticmethod
    def get_save_rdb_node(pair_nodes):
        save_node = None
        # 如果只有主节点，直接分解这个子任务
        if len(pair_nodes) == 1:
            save_node = pair_nodes[0]
        else:
            # 有多个主从，只拷贝一个从节点的rdb文件
            for node in pair_nodes:
                extend_info = node[BackupJobParam.EXTEND_INFO]
                if int(extend_info[ExtendInfo.ROLE]) == NodeRole.SLAVE:
                    save_node = node
                    break
        return save_node

    @staticmethod
    def build_master_slave_pair(nodes):
        pairs = []
        # 先找主节点信息
        for node in nodes:
            extend_info = node['extendInfo']
            if int(extend_info['role']) != NodeRole.MASTER.value:
                continue
            pair = MasterSlavePair(host=f'{extend_info["ip"]}:{extend_info["port"]}')
            pairs.append(pair)
        for pair in pairs:
            pair_nodes = []
            for node in nodes:
                extend_info = node['extendInfo']
                if (pair.host == f'{extend_info["ip"]}:{extend_info["port"]}' or
                    pair.host == extend_info['pair']) and \
                        int(extend_info[ExtendInfo.STATUS]) == NodeStatus.ONLINE:
                    pair_nodes.append(node)
            pair.nodes = pair_nodes
        return pairs

    @staticmethod
    def check_backup_job_type(pid, job_id):
        log.info(f'start to check_backup_job_type, job_id:{job_id}')
        try:
            redis_parse_param_with_jsonschema(pid)
        except Exception as err:
            log.error(err, exec_info=True)
        output_result_file(pid, CommonBodyErrorResponse().dict(by_alias=True))

    @staticmethod
    def backup_post_job(pid, job_id):
        log.info(f'start to backup_post_job, job_id:{job_id}')
        job_param = redis_parse_param_with_jsonschema(pid)
        backup_job_result = job_param[BackupJobParam.BACKUP_JOB_RESULT]
        log.info(f'job_status: {backup_job_result}')
        if int(backup_job_result) != BackupJobResult.SUCCESS:
            BackupTaskManager.clear_copy_data(job_param)
        output_result_file(pid, CommonBodyErrorResponse().dict(by_alias=True))

    @staticmethod
    def abort_job(pid, job_id, sub_job_id):
        log.info(f'start to abort_job, job_id:{job_id},sub_job_id:{sub_job_id}')
        try:
            redis_parse_param_with_jsonschema(pid)
        except Exception as err:
            log.error(err, exec_info=True)
        BackupTaskManager.kill_sub_job(sub_job_id)
        output_result_file(pid, CommonBodyErrorResponse().dict(by_alias=True))

    @staticmethod
    def clear_copy_data(job_param):
        # 获取 data 仓文件系统挂载路径
        mount_path = BackupTaskManager.query_mount_path(
            job_param[BackupJobParam.JOB][BackupJobParam.REPOSITORIES])
        # 获取副本ID
        copy_id = job_param[BackupJobParam.JOB][BackupJobParam.COPY][0][ExtendInfo.ID]
        copy_data_path = f'{mount_path}/{copy_id}'
        log.info(f'copy_data_path:{copy_data_path}')
        if os.path.exists(copy_data_path):
            su_exec_rm_cmd(copy_data_path, check_white_black_list_flag=False)
            log.info(f"copy_data_path:{copy_data_path} has deleted")

    @staticmethod
    def kill_sub_job(sub_job_id):
        if sub_job_id is None or len(sub_job_id) == 0:
            return
        processes = psutil.process_iter()
        for process in processes:
            try:
                cmd_lines = process.cmdline()
                if len(cmd_lines) < 6:
                    continue
                if str(sub_job_id) == str(cmd_lines[5]) and \
                        ("Backup" == str(cmd_lines[2]) or "Restore" == str(cmd_lines[2])):
                    log.info(f'kill backup process, pid: {process.pid}')
                    os.kill(process.pid, signal.SIGKILL)
                    return
            except Exception as err:
                log.error(err, exec_info=True)
                continue

    @staticmethod
    def backup_common_progress(pid, job_id, sub_job_id):
        log.info(f"start to query process, pid:{pid}, job_id:{job_id}")
        try:
            redis_parse_param_with_jsonschema(pid)
        except Exception as err:
            log.error(err, exec_info=True)
        progress = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, progress=100,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        output_result_file(pid, progress.dict(by_alias=True))

    @staticmethod
    def get_master_instance_by_slave(nodes, pair):
        """
        根据从实例查询对应的主实例信息以及主实例所在的agentId
        """
        log.info(f'pair:{pair}')
        master_instance = str(pair).split(',')[0]
        instance_ip = master_instance.split(':')[0]
        for node in nodes:
            extend_info = node[BackupJobParam.EXTEND_INFO]
            if int(extend_info[ExtendInfo.ROLE]) == NodeRole.MASTER and extend_info[
                ExtendInfo.IP] == instance_ip and int(extend_info[
                                                          ExtendInfo.STATUS]) == NodeStatus.ONLINE:
                return extend_info[ExtendInfo.AGENT_ID], node
        raise Exception("the cluster status abnormal")

    @staticmethod
    def backup_process(pid, job_id, sub_job_id):
        log.info(f"start to query backup process, pid:{pid}, job_id:{job_id},sub_job_id:{sub_job_id}")
        try:
            redis_parse_param_with_jsonschema(pid)
        except Exception as err:
            log.error(err, exec_info=True)
        JobProgress.get_progress(pid, job_id, sub_job_id, ProgressType.BACKUP)

    @staticmethod
    def query_backup_copy(pid, job_id, sub_job_id):
        log.info(f"start to query backup copy, pid:{pid}, job_id:{job_id},sub_job_id:{sub_job_id}")
        copy_info = {}
        try:
            job_param = redis_parse_param_with_jsonschema(pid)[BackupJobParam.JOB]
            copy_id = job_param[BackupJobParam.COPY][0][ExtendInfo.ID]
            repositories = job_param[BackupJobParam.REPOSITORIES]
            for repository in repositories:
                if int(repository[BackupJobParam.REPOSITORY_TYPE]) == 1:
                    repository[BackupJobParam.REMOTE_PATH] = f"{repository[BackupJobParam.REMOTE_PATH]}/{copy_id}"
            copy_info[BackupJobParam.REPOSITORIES.value] = repositories
            copy_info[BackupJobParam.EXTEND_INFO.value] = {ExtendInfo.COPY_ID.value: copy_id}
            log.info(f'copy_info:{copy_info}')
        except Exception as err:
            log.error(err, exc_info=True)
        output_result_file(pid, copy_info)

    @staticmethod
    def report_process(pid, total_size, extend_info, target_file, process: SubJobDetails):
        process.data_size = total_size
        source_file = f"{extend_info[ExtendInfo.DIR]}/{extend_info[ExtendInfo.DBFILENAME]}"
        if not os.path.exists(target_file) or not os.path.exists(source_file):
            report_job_details(pid, process)
            return
        save_size = os.stat(target_file).st_size
        log.info(f'save_size:{save_size}, total_size:{total_size}, source_file:{source_file}')
        process.progress = 100 if total_size == 0 else int(save_size / total_size * 100)
        if process.progress == 100:
            process.task_status = SubJobStatusEnum.COMPLETED
        process.data_size = int(save_size / 1024)
        if process.progress == 100:
            log_detail = LogDetail(logInfo="plugin_backup_subjob_success_label",
                                   logInfoParam=[process.sub_task_id, "1", format_capacity_convert(total_size)],
                                   logLevel=LogLevel.INFO)
            process.log_detail = [log_detail]
        report_job_details(pid, process)

    @staticmethod
    def write_backup_progress_to_cache(job_id, sub_job_id, process):
        JobProgress.write_progress_to_cache(job_id, sub_job_id, process.dict(by_alias=True), ProgressType.BACKUP)

    @staticmethod
    def query_instance_detail(nodes: list, job_param: dict):
        """
        根据子任务信息获取要执行的实例详情
        """
        sub_job = job_param.get(BackupJobParam.SUBJOB)
        sub_job_info = json.loads(sub_job[BackupJobParam.JOB_INFO])
        log.info(f'job_info:{sub_job_info}')
        index = 0
        while index < len(nodes):
            log.info(f'start to find node')
            node = nodes[index]
            extend_info = node[BackupJobParam.EXTEND_INFO]
            if extend_info[ExtendInfo.IP] == sub_job_info['ip'] and \
                    extend_info[ExtendInfo.PORT] == sub_job_info['port']:
                return node, index
            index = index + 1
        raise CustomError(error_desc='no found node which to be backup')

    @staticmethod
    def query_mount_path(repositories: list):
        """
        查询文件系统挂载路径
        """
        mount_path = ''
        index = 0
        while index < len(repositories):
            repository = repositories[index]
            if int(repository[BackupJobParam.REPOSITORY_TYPE]) == 1:
                mount_path = repository['path'][0]
                break
            index = index + 1
        return mount_path

    @staticmethod
    def mkdir_by_copy_id(mount_path, copy_id):
        """
        根据副本ID在对应的文件系统创建目录
        """
        log.info(f'copy_id: {copy_id}')
        if not os.path.exists(mount_path):
            log.error(f'mount path does not exist')
            raise CustomError(error_desc='mount path does not exist')
        target_path = f'{mount_path}/{copy_id}'
        if os.path.exists(target_path):
            log.info(f'mount path has exist')
            return target_path
        exec_mkdir_cmd(target_path)
        return target_path

    @staticmethod
    def deal_error(error: Exception, process: SubJobDetails):
        log.error(error, exc_info=True)
        process.progress = 100
        process.task_status = SubJobStatusEnum.FAILED
        log_detail = LogDetail(logInfo="plugin_backup_subjob_fail_label", logInfoParam=[process.sub_task_id],
                               logLevel=LogLevel.ERROR)
        if error.__class__.__name__ == CustomError().__class__.__name__:
            log_detail.log_detail = error.code
            log_detail.log_detail_param = error.error_parameter
        else:
            log_detail.log_detail = ErrorCode.ERROR_INTERNAL
        process.log_detail = [log_detail]
        return process

    @staticmethod
    def generate_param(pid):
        job_param = redis_parse_param_with_jsonschema(pid)
        nodes = job_param[BackupJobParam.JOB][BackupJobParam.PROTECT_ENV][BackupJobParam.NODES]
        job_detail, node_index_in_env = BackupTaskManager.query_instance_detail(nodes, job_param)
        env_name = f'job_protectEnv_nodes_{node_index_in_env}_auth'
        log.info(f'env_name:{env_name}')
        # 认证信息存在环境变量里，此处根据环境变量获取认证信息
        auth_obj, _ = ClusterResManager.pre_auth_job(pid, job_detail[BackupJobParam.AUTH], env_name)
        extend_info = job_detail[BackupJobParam.EXTEND_INFO]
        user, cmd_path, _ = ClusterResManager.get_redis_process_info(extend_info[ExtendInfo.IP],
                                                                     extend_info[ExtendInfo.PORT])
        log.info(f'user:{user}, cmd_path: {cmd_path}')
        extend_info[ExtendInfo.ID] = job_detail[BackupJobParam.ID]
        extend_info[ExtendInfo.DBFILENAME] = RedisClient.query_redis_node_dbfilename(user, cmd_path, auth_obj,
                                                                                     extend_info)
        extend_info[ExtendInfo.DIR] = RedisClient.query_redis_node_dir(user, cmd_path, auth_obj, extend_info)
        # 获取 data 仓文件系统挂载路径
        mount_path = BackupTaskManager.query_mount_path(
            job_param[BackupJobParam.JOB][BackupJobParam.REPOSITORIES])
        # 获取副本ID
        copy_id = job_param[BackupJobParam.JOB][BackupJobParam.COPY][0][ExtendInfo.ID]
        copy_path = BackupTaskManager.mkdir_by_copy_id(mount_path, copy_id)
        target_file = f"{copy_path}/{extend_info[ExtendInfo.ID]}.rdb"
        log.info(f'copy_path:{copy_path}')
        back_param = {
            'user': user, 'cmdPath': cmd_path, 'auth': auth_obj, 'extendInfo': extend_info, 'targetFile': target_file
        }
        return back_param

    def backup(self):
        process = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_job_id, progress=0,
                                taskStatus=SubJobStatusEnum.RUNNING)
        try:
            log.info(f'start to execute backup, job_id:{self.job_id},sub_job_id:{self.sub_job_id},pid:{self.pid}')
            log_detail = LogDetail(logInfo="plugin_start_backup_copy_subjob_label", logInfoParam=[process.sub_task_id],
                                   logLevel=LogLevel.INFO)
            process.log_detail = [log_detail]
            report_job_details(self.pid, process)
            back_param = BackupTaskManager.generate_param(self.pid)
            # 第一步执行bgsave
            self.execute_bgsave(back_param['user'], back_param['cmdPath'], back_param['auth'], back_param['extendInfo'])
            # 第二步将rdb拷贝到目标文件系统
            self.copy_rdb_to_file_system(back_param['extendInfo'], back_param['targetFile'], process)
        except Exception as err:
            process = BackupTaskManager.deal_error(err, process)
            report_job_details(self.pid, process)
        response = CommonBodyErrorResponse()
        output_result_file(self.pid, response.dict(by_alias=True))

    def execute_bgsave(self, user, cmd_path, auth, extend_info):
        """
        将内存中的数据持久化到RDB文件中
        """
        log.info("start to excute bgsave")
        code, lastsave_time, err = RedisClient.execute_redis_cmd(user, cmd_path, auth, extend_info,
                                                                 BackupRedisCmd.LASTSAVE)
        if int(code) != CommandReturnCode.SUCCESS.value:
            log.error(f'execute redis-cli lastsave failed, message: {lastsave_time}, err: {err}')
            raise CustomError(error_desc=err, error_code=ErrorCode.ERROR_NETWORK_CONNECT_TIMEOUT)
        log.info(f'last save time is {lastsave_time}')
        code, out, err = RedisClient.execute_redis_cmd(user, cmd_path, auth, extend_info, BackupRedisCmd.BGSAVE)
        if int(code) != CommandReturnCode.SUCCESS or str(out).strip() not in self.exec_bgsave_success_msg:
            log.error(f'execute redis-cli bgsave failed, code:{code}, message: {out}, err: {err}')
            raise CustomError(error_desc=err)
        log.info(f'execute redis-cli lastsave success: {out}')
        lastsave_feature = self.pool.submit(RedisClient.execute_redis_cmd, user, cmd_path, auth, extend_info,
                                            BackupRedisCmd.LASTSAVE)
        while not lastsave_feature.done():
            time.sleep(1)
        log.info(f'result: {lastsave_feature.result()}')
        if int(lastsave_feature.result()[0]) != CommandReturnCode.SUCCESS:
            raise CustomError(error_code=ErrorCode.ERROR_NETWORK_CONNECT_TIMEOUT, error_desc=err)
        cur_save_time = lastsave_feature.result()[1]
        log.info(f'diff:{int(cur_save_time) - int(lastsave_time)}')
        log.info("execute bgsave finished")

    def copy_rdb_to_file_system(self, extend_info, target_file, process):
        log.info(f'start to copy rdb to filesystem')
        # 将任务详情清空
        process.log_detail = None
        source_file = f'{extend_info[ExtendInfo.DIR]}/{extend_info[ExtendInfo.DBFILENAME]}'
        log.info(f'start copy from {source_file} to {target_file}')
        total_size = os.stat(source_file).st_size if os.path.exists(source_file) else 0
        log.info(f'need copy file total size: {total_size}')
        copy_feature = self.pool.submit(exec_cp_cmd, source_file, target_file, None, '-f')
        while not copy_feature.done():
            time.sleep(2)
            BackupTaskManager.report_process(self.pid, total_size, extend_info, target_file, process)
        is_success = copy_feature.result()
        log.info(f'result:{is_success}')
        if not is_success:
            log.error(f'execute copy from {source_file} to {target_file} failed')
            raise CustomError(error_code=ErrorCode.ERROR_INTERNAL, error_desc='copy file failed')
        process.task_status = SubJobStatusEnum.COMPLETED
        process.progress = 100
        log_detail = LogDetail(logInfo="plugin_backup_subjob_success_label",
                               logInfoParam=[process.sub_task_id, "1", format_capacity_convert(total_size)],
                               logLevel=LogLevel.INFO)
        process.log_detail = [log_detail]
        BackupTaskManager.report_process(self.pid, total_size, extend_info, target_file, process)
