#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import time
import operator
from concurrent.futures import ThreadPoolExecutor

from common.file_common import change_path_permission
from redis.common.common import format_capacity_convert, report_job_details

from redis import log
from common.common import output_result_file, execute_cmd
from common.common_models import SubJobDetails, LogDetail
from common.const import RepositoryDataTypeEnum, SubJobStatusEnum, CopyDataTypeEnum
from common.util.exec_utils import exec_cp_cmd, su_exec_rm_cmd

from redis.common.const import ProgressType, ErrorCode, NodeRole, RestoreProgressPhase, LogLevel
from redis.common.exception import CustomError
from redis.common.parse_param import get_repository_by_type, redis_parse_param_with_jsonschema
from redis.schemas.event_message import CommonBodyErrorResponse
from redis.schemas.redis_info_schema import MasterSlavePair
from redis.schemas.job_schema import SubJob
from redis.service.job_progress import JobProgress
from redis.service.cluster_res_service import ClusterResManager


class RedisRestore:
    TMP_RDB_FILE_SUFFIX = '_bak'

    @staticmethod
    def allow_restore_in_local_node(pid, job_id, sub_job_id):
        log.info(f'pid: {pid}, job id: {job_id}, sub job id: {sub_job_id}, execute allow restore in local node')
        response = CommonBodyErrorResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def allow_restore_sub_job_in_local_node(pid, job_id, sub_job_id):
        log.info(f'pid: {pid}, job id: {job_id}, sub job id: {sub_job_id}, execute allow restore in sub job local node')
        response = CommonBodyErrorResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def restore_prerequisite(pid, job_id, sub_job_id):
        log.info(f'Step 1: pid: {pid}, job id: {job_id}, execute restore prerequisite task')
        file_content = redis_parse_param_with_jsonschema(pid)
        copy = file_content['job']['copies'][0]
        protect_env_nodes = copy['protectEnv']['nodes']
        target_env_nodes = file_content['job']['targetEnv']['nodes']
        progress_content = SubJobDetails(
            taskId=job_id, subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.COMPLETED.value, progress=100)
        if RedisRestore.check_same_slot(protect_env_nodes, target_env_nodes):
            # 将进度写入临时目录中
            JobProgress.write_progress_to_cache(job_id, sub_job_id, progress_content.dict(by_alias=True),
                                                ProgressType.RESTORE_PREREQUISITE.value)
        else:
            progress_content.task_status = SubJobStatusEnum.FAILED.value
            JobProgress.write_progress_to_cache(job_id, sub_job_id, progress_content.dict(by_alias=True),
                                                ProgressType.RESTORE_PREREQUISITE.value)

    @staticmethod
    def restore(pid, job_id, sub_job_id):
        log.info(f'Step 3: pid: {pid}, job id: {job_id}, sub job id: {sub_job_id}, execute restore task')
        file_content = redis_parse_param_with_jsonschema(pid)
        nodes = file_content['job']['targetEnv']['nodes']
        sub_job = file_content['subJob']
        # 上报进度：开始检查是否符合redis恢复条件
        progress_content = SubJobDetails(taskId=job_id, subTaskId=sub_job_id,
                                         taskStatus=SubJobStatusEnum.RUNNING.value, dataSize=0,
                                         progress=RestoreProgressPhase.BEGIN_CHECK_NODE_ALLOW_RESTORE.value)
        report_job_details(pid, progress_content)
        local_nodes = RedisRestore.get_local_nodes(sub_job['jobName'], nodes)
        # 如果本地生产端存在执行的redis，报错，让用户先关掉redis再恢复
        # 如果aof持久化，报错
        code = RedisRestore.check_nodes_allow_restore(local_nodes)
        if code != ErrorCode.SUCCESS.value:
            log.error(f'sub job id: {sub_job_id}, redis is running')
            # 上报失败进度
            log_detail = LogDetail(logInfo="plugin_restore_subjob_fail_label",
                                   logInfoParam=[sub_job_id],
                                   logLevel=LogLevel.ERROR)
            progress_content = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, logDetail=[log_detail],
                                             taskStatus=SubJobStatusEnum.FAILED.value, dataSize=0,
                                             progress=RestoreProgressPhase.COMPLETE.value)
            report_job_details(pid, progress_content)
            return
        # 上报进度：结束检查redis节点
        log_detail = LogDetail(logInfo="plugin_start_restore_copy_subjob_label", logInfoParam=[sub_job_id],
                               logLevel=LogLevel.INFO)
        progress_content = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, logDetail=[log_detail],
                                         taskStatus=SubJobStatusEnum.RUNNING.value, dataSize=0,
                                         progress=RestoreProgressPhase.END_CHECK_NODE_ALLOW_RESTORE.value)
        report_job_details(pid, progress_content)
        try:
            RedisRestore.execute_restore(pid, job_id, sub_job_id, file_content)
        except Exception as err:
            log.error(f'job id: {job_id}, sub job id: {sub_job_id}, restore fail')
            log.error(err, exc_info=True)
            # 上报进度：恢复失败
            log_detail = LogDetail(logInfo="plugin_restore_subjob_fail_label",
                                   logInfoParam=[sub_job_id],
                                   logLevel=LogLevel.ERROR)
            progress_content = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, logDetail=[log_detail],
                                             taskStatus=SubJobStatusEnum.FAILED.value,
                                             progress=RestoreProgressPhase.COMPLETE.value)
            report_job_details(pid, progress_content)
            RedisRestore.clean_env(file_content, ErrorCode.ERROR_INTERNAL)
            return
        RedisRestore.clean_env(file_content, ErrorCode.SUCCESS)
        response = CommonBodyErrorResponse()
        output_result_file(pid, response.dict(by_alias=True))
        log.info(f'job id: {job_id}, sub job id: {sub_job_id}, restore success')

    @staticmethod
    def restore_post(pid, job_id, sub_job_id):
        log.info(f'job id: {job_id}, sub job id: {sub_job_id}, execute restore post task')
        # 将进度写入临时目录中
        progress_content = SubJobDetails(
            taskId=job_id, subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.COMPLETED.value, progress=100)
        JobProgress.write_progress_to_cache(job_id, sub_job_id, progress_content.dict(by_alias=True),
                                            ProgressType.RESTORE_POST.value)

    @staticmethod
    def get_local_nodes(agent_node_id, nodes):
        local_nodes = []
        for node in nodes:
            node_agent = node['extendInfo']['agentId']
            if agent_node_id == node_agent:
                local_nodes.append(node)
        return local_nodes

    @staticmethod
    def check_nodes_allow_restore(nodes):
        for node in nodes:
            extend_info = node['extendInfo']
            result = ClusterResManager.get_redis_process_info(extend_info['ip'], extend_info['port'])
            if result:
                return ErrorCode.ERR_EXIST_RUNNING_NODE.value
            # 是否开启aof
            if int(extend_info['aofEnabled']) == 1:
                return ErrorCode.ERR_AOF_ENABLE.value
        return ErrorCode.SUCCESS.value

    @staticmethod
    def check_same_slot(src_nodes, dst_nodes):
        src_slots = list(map(lambda node: node['extendInfo']['slot'], src_nodes))
        dst_slots = list(map(lambda node: node['extendInfo']['slot'], dst_nodes))
        return operator.eq(sorted(src_slots), sorted(dst_slots))

    @staticmethod
    def execute_restore(pid, job_id, sub_job_id, file_content):
        copy = file_content['job']['copies'][0]
        log.info(f'job id: {job_id}, sub job id: {sub_job_id}, execute restore')
        protect_env_nodes = copy['protectEnv']['nodes']
        target_env_nodes = file_content['job']['targetEnv']['nodes']
        rdb_parent_path = RedisRestore.get_rdb_parent_path(copy)
        log.info(f"job id: {job_id}, sub job id: {sub_job_id}, rdb parent path: {rdb_parent_path}")
        slot_pair, total = RedisRestore.build_target_slot_pair(file_content['subJob']['jobName'],
                                                               protect_env_nodes, target_env_nodes, rdb_parent_path)
        log.info(f"job id: {job_id}, sub job id: {sub_job_id}, build target slot pair size: {len(slot_pair)}")
        # Step 1: 先删除目标节点上的rdb文件
        log.info(f'job id: {job_id}, sub job id: {sub_job_id}, delete target nodes rdb files')
        RedisRestore.rename_target_nodes_rdb_file(slot_pair)
        rdb_total_size = RedisRestore.get_rdb_files_total_size(slot_pair)
        pool = ThreadPoolExecutor(max_workers=4, thread_name_prefix='redis-restore')
        # Step 2：开始将rdb文件拷贝到各个节点上
        log.info(f'job id: {job_id}, sub job id: {sub_job_id}, start to copy rdb files')
        file_copy_thread_list = []
        for pair in slot_pair:
            log.info(f"slot: {pair.slot}, nodes name: {list(map(lambda tmp_node: tmp_node['name'], pair.nodes))}")
            # 将文件拷贝到目标端
            for node in pair.nodes:
                dst_file = f"{node['extendInfo']['dir']}/{node['extendInfo']['dbfilename']}"
                log.info(f'start copy from {pair.rdb_path} to {dst_file}')
                stat_info = os.stat(node['extendInfo']['dir'])
                cp_thread = pool.submit(exec_cp_cmd, pair.rdb_path, dst_file, None, '-f')
                file_copy_thread_list.append((cp_thread, stat_info, dst_file))
        while True:
            time.sleep(3)
            RedisRestore.report_restore_progress_thread(pid, job_id, sub_job_id, slot_pair, rdb_total_size)
            is_done, result = RedisRestore.get_copy_thread_progress(file_copy_thread_list)
            if is_done:
                if result != ErrorCode.SUCCESS:
                    raise CustomError(error_code=result, error_desc='copy rdb file failed')
                break
        # Step 3：文件拷贝成功后，更改目标文件的属主
        for copy_thread_info in file_copy_thread_list:
            t_copy_thread, t_stat_info, t_dst_file = copy_thread_info
            os.lchown(t_dst_file, t_stat_info.st_uid, t_stat_info.st_gid)
        # Step 4：上报完成进度
        save_size, file_num = RedisRestore.get_save_files_size_num(slot_pair)
        log_detail = LogDetail(logInfo="plugin_restore_subjob_success_label",
                               logInfoParam=[sub_job_id, str(file_num), format_capacity_convert(rdb_total_size)],
                               logLevel=LogLevel.INFO)
        progress_content = SubJobDetails(taskId=job_id,
                                         subTaskId=sub_job_id,
                                         logDetail=[log_detail],
                                         taskStatus=SubJobStatusEnum.COMPLETED,
                                         progress=100,
                                         dataSize=int(rdb_total_size / 1024))
        report_job_details(pid, progress_content)

    @staticmethod
    def get_rdb_parent_path(copy):
        data_repo_path = RedisRestore.get_copy_data_repo_path(copy['repositories'])
        copy_type = copy['type']
        if copy_type == CopyDataTypeEnum.S3_ARCHIVE or copy_type == CopyDataTypeEnum.TAP_ARCHIVE:
            rdb_parent_path = os.path.realpath(os.path.join(data_repo_path, copy['extendInfo']['extendInfo']['copyId']))
        else:
            rdb_parent_path = os.path.realpath(os.path.join(data_repo_path, copy['extendInfo']['copyId']))
        return rdb_parent_path

    @staticmethod
    def build_target_slot_pair(local_agent_id, protect_env_nodes, target_nodes, rdb_parent_path):
        log.info(f'build target slot pair begin')
        slot_pairs = []
        for node in protect_env_nodes:
            extend_info = node['extendInfo']
            if int(extend_info['role']) != NodeRole.MASTER.value:
                continue
            rdb_path = RedisRestore.build_slot_pair_rdb_path(rdb_parent_path, protect_env_nodes, node)
            pair = MasterSlavePair(slot=extend_info['slot'], rdb_path=rdb_path)
            log.info(f'node: {node["id"]}, rdb path: {pair.rdb_path}, slot: {pair.slot}')
            # 查找主节点ID
            for target_node in target_nodes:
                if int(target_node['extendInfo']['role']) == NodeRole.MASTER.value and \
                        target_node['extendInfo']['slot'] == node['extendInfo']['slot']:
                    pair.master_id = target_node['id']
                    pair.host = f"{target_node['extendInfo']['ip']}:{target_node['extendInfo']['port']}"
                    break
            slot_pairs.append(pair)
        # 在slot pair里面添加对应槽位的主从节点
        total = RedisRestore.add_nodes_info_in_slot_pair(slot_pairs, local_agent_id, target_nodes)
        return slot_pairs, total

    @staticmethod
    def add_nodes_info_in_slot_pair(slot_pairs, local_agent_id, target_nodes):
        total = 0
        for pair in slot_pairs:
            nodes = []
            for node in target_nodes:
                # 如果主节点及主节点对应的从节点作为一组
                if not RedisRestore.check_local_same_pair(pair, node, local_agent_id):
                    continue
                total = total + 1
                nodes.append(node)
            pair.nodes = nodes
        return total

    @staticmethod
    def check_local_same_pair(pair, target_node, local_agent_id):
        return local_agent_id == target_node['extendInfo']['agentId'] and \
            (target_node['id'] == pair.master_id or target_node['extendInfo']['pair'] == pair.host)

    @staticmethod
    def build_slot_pair_rdb_path(rdb_parent_path, protect_env_nodes, node):
        host = f"{node['extendInfo']['ip']}:{node['extendInfo']['port']}"
        # 首先判断对应的从节点存不存在rdb文件
        for protect_node in protect_env_nodes:
            if int(protect_node['extendInfo']['role']) == NodeRole.SLAVE.value and \
                    protect_node['extendInfo']['pair'] == host:
                rdb_path = os.path.join(rdb_parent_path, protect_node['id'] + '.rdb')
                if os.path.exists(rdb_path):
                    return rdb_path
        # 对应的从节点没有rdb，判断主节点是否有rdb文件
        rdb_path = os.path.realpath(os.path.join(rdb_parent_path, node['id'] + '.rdb'))
        if os.path.exists(rdb_path):
            return rdb_path
        return ''

    @staticmethod
    def restore_gen_sub_job(pid, job_id, sub_job_id):
        log.info(f'Step 2: job id: {job_id}, sub job id: {sub_job_id}, redis gen restore sub jobs')
        sub_jobs = []
        file_content = redis_parse_param_with_jsonschema(pid)
        nodes = file_content['job']['targetEnv']['nodes']
        tmp_agent_ids = []
        for node in nodes:
            # 去重,相同的agent只分一个
            agent_id = node['extendInfo']['agentId']
            if agent_id in tmp_agent_ids:
                continue
            sub_job = SubJob(jobId=job_id, jobName=agent_id, execNodeId=agent_id)
            sub_jobs.append(sub_job.dict(by_alias=True))
            tmp_agent_ids.append(agent_id)
        # 如果没有生成子任务，非0退出
        if len(sub_jobs) == 0:
            return False
        output_result_file(pid, sub_jobs)
        return True

    @staticmethod
    def get_copy_data_repo_path(repo):
        data_repo = get_repository_by_type(repo, RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        try:
            return data_repo['path'][0]
        except KeyError:
            log.error('get path key error')
            return ''

    @staticmethod
    def report_restore_progress_thread(pid, job_id, sub_job_id, slot_pair, rdb_total_size):
        # 如果总大小为0，直接返回
        if rdb_total_size == 0:
            progress_content = SubJobDetails(taskId=job_id,
                                             subTaskId=sub_job_id,
                                             taskStatus=SubJobStatusEnum.RUNNING,
                                             progress=100,
                                             dataSize=0)
            report_job_details(pid, progress_content)
            return
        save_size, file_num = RedisRestore.get_save_files_size_num(slot_pair)
        # 进度计算：10 + (save / total) * 85
        progress = RestoreProgressPhase.END_CHECK_NODE_ALLOW_RESTORE + (save_size / rdb_total_size) * \
                   (RestoreProgressPhase.END_COPY_FILE - RestoreProgressPhase.END_CHECK_NODE_ALLOW_RESTORE)
        progress_content = SubJobDetails(taskId=job_id,
                                         subTaskId=sub_job_id,
                                         taskStatus=SubJobStatusEnum.RUNNING,
                                         progress=progress,
                                         dataSize=int(save_size / 1024))
        report_job_details(pid, progress_content)
        log.info(f'report restore copy progress, save/total: {save_size}/{rdb_total_size}')

    @staticmethod
    def get_rdb_files_total_size(slot_pair):
        total = 0
        for pair in slot_pair:
            total = total + os.path.getsize(pair.rdb_path) * len(pair.nodes)
            log.info(f'get total size, file: {pair.rdb_path}, size: {os.path.getsize(pair.rdb_path)}')
        return total

    @staticmethod
    def get_save_files_size_num(slot_pair):
        log.info('start to get save files size')
        save_size = 0
        file_num = 0
        for pair in slot_pair:
            for node in pair.nodes:
                file = os.path.realpath(f"{node['extendInfo']['dir']}/{node['extendInfo']['dbfilename']}")
                if os.path.exists(file):
                    file_num += 1
                    save_size = save_size + os.path.getsize(file)
                    log.debug(f'save file: {file}, size: {os.path.getsize(file)}')
        log.info('end to get save files size')
        return save_size, file_num

    @staticmethod
    def rename_target_nodes_rdb_file(slot_pair):
        for pair in slot_pair:
            for node in pair.nodes:
                file = os.path.realpath(f"{node['extendInfo']['dir']}/{node['extendInfo']['dbfilename']}")
                if os.path.exists(file):
                    tmp_file = file + RedisRestore.TMP_RDB_FILE_SUFFIX
                    os.rename(file, tmp_file)

    @staticmethod
    def get_copy_thread_progress(file_copy_thread_list):
        """
        功能描述：获取文件拷贝线程结果
        参数：
        @file_copy_thread_list 拷贝线程，样例：(线程,源文件属性,目标文件)
        返回值：是否结束，拷贝结果
        """
        result = ErrorCode.SUCCESS
        for copy_thread_info in file_copy_thread_list:
            copy_thread, stat_info, dst_file = copy_thread_info
            if not copy_thread.done():
                return False, result
            thread_result = copy_thread.result()
            if not thread_result:
                log.error(f"copy to {dst_file} failed")
                result = ErrorCode.ERROR_INTERNAL
        return True, result

    @staticmethod
    def clean_env(file_content, restore_result):
        """
        功能描述：执行恢复后清理环境，当恢复成功时，删除临时文件，恢复失败时，还原文件
        参数：
        @file_copy_thread_list 参数
        @restore_result 执行恢复操作结果
        返回值：无
        """
        target_env_nodes = file_content['job']['targetEnv']['nodes']
        local_agent_id = file_content['subJob']['jobName']
        for node in target_env_nodes:
            extend_info = node['extendInfo']
            if extend_info['agentId'] != local_agent_id:
                continue
            tmp_file = os.path.realpath(
                f"{extend_info['dir']}/{extend_info['dbfilename']}{RedisRestore.TMP_RDB_FILE_SUFFIX}")
            if not os.path.exists(tmp_file):
                continue
            if restore_result == ErrorCode.SUCCESS:
                su_exec_rm_cmd(tmp_file, check_white_black_list_flag=False)
            else:
                src_file = os.path.realpath(f"{extend_info['dir']}/{extend_info['dbfilename']}")
                os.rename(tmp_file, src_file)
