#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import math
import os
import time
import stat

from common.common_models import SubJobDetails, LogDetail
from common.const import RepositoryDataTypeEnum, SubJobStatusEnum, SubJobPriorityEnum, RpcParamKey
from common.common import execute_cmd, exec_rpc_tool_cmd, invoke_rpc_tool_interface

from ndmp.comm.log import log
from ndmp.comm.share_resource_manager import ShareResourceManager
from ndmp.comm.utils import report_job_details, format_capacity, remove_file
from ndmp.comm.const import SubJobType, NdmpLabel, LogLevel, NdmpCopyStatus, NdmpFileSystemPrefix, CopyFormat, Constant
from ndmp.schemas.ndmp_schemas import ResultInfo, SubJob, RestoreTaskStats
from ndmp.service.client.ndmp_client import NdmpClientInterfaceS
from ndmp.service.ndmp_service_base import NDMPServiceBase, LastCopyType
from ndmp.service.restore.parse_restore_param import RestoreParam


class NDMPRestoreService(NDMPServiceBase):
    def __init__(self, req_id: str, job_id: str, sub_id: str):
        super().__init__(req_id, job_id, sub_id)
        self.param = RestoreParam(self.req_id)
        self.last_process_size = 0
        self.process_log_info = ""
        self.task_status = SubJobStatusEnum.RUNNING.value
        self.error_code = NdmpCopyStatus.INTERNAL_ERROR
        self.restore_size = 0
        self.files_cnt = 0
        self.start_time = 0
        self.restore_sub_dir_list: list = self.param.get_sub_dir()
        self.last_report_time = int(time.time())
        self.copy_format = self.param.get_copies()[0].get("format", 1)
        self.label_dict = self.init_label_dict()
        self.cache_path = self.param.get_cache_path()
        self.level = self.param.get_restore_level()

    def init_label_dict(self):
        label_dict = super().init_label_dict()
        label_dict.update({NdmpCopyStatus.INTERNAL_ERROR: {"label": NdmpLabel.EXECUTE_RESTORE_FAILED_LABEL,
                                                           "param": [self.sub_id]}})
        return label_dict

    def allow_restore_in_local_node(self):
        """
        功能描述：检查当前节点是否可以执行恢复任务
        """
        log.info(f"allow_restore_in_local_node success, job id:{self.job_id}.")
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        return True

    def restore_prerequisite(self):
        """
         功能描述：恢复前置任务
        """
        log.info(f"restore_prerequisite success, job id:{self.job_id}.")
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        return True

    def restore_prerequisite_progress(self):
        """
         功能描述：恢复前置任务进度上报
        """
        log.info(f"backup_prerequisite_progress success, job id:{self.job_id}.")
        progress = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        self.report_result(progress.dict(by_alias=True))
        return True

    def restore_gen_sub_job(self):
        """
        功能描述：分解子任务
        """
        log.info(f"restore_gen_sub_job start, job id:{self.job_id}.")
        response = [
            SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName=str(self.job_id),
                    ignoreFailed=False).dict(by_alias=True),
            SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName="ReportNdmpRestoreInfo",
                    jobPriority=SubJobPriorityEnum.JOB_PRIORITY_4).dict(by_alias=True)
        ]
        self.report_result(response)
        log.info(f"restore_gen_sub_job end, job id:{self.job_id}.")
        return True

    def generate_sub_task(self):
        log.info(f"generate_sub_task start, job id:{self.job_id}.")
        start_index = 0
        path_count = len(self.restore_sub_dir_list)
        cur_job_priority = 1
        response = list()
        while start_index < path_count:
            # 剩余路径个数大于10个，取10个作为同一优先级
            if path_count - start_index > 10:
                tmp_file_path_list = self.restore_sub_dir_list[start_index:start_index + 10]
            else:
                tmp_file_path_list = self.restore_sub_dir_list[start_index:]
            for _, path in enumerate(tmp_file_path_list):
                log.info(f"Add new restore sub job with path: {path}")
                job_info = path
                sub_job = SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName=str(self.job_id),
                                 jobPriority=cur_job_priority, ignoreFailed=False, jobInfo=job_info).dict(by_alias=True)
                response.append(sub_job)
            cur_job_priority += 1
            start_index += 10
        # 生成上报总恢复信息的任务
        response.append(
            SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName="ReportNdmpRestoreInfo",
                   jobPriority=cur_job_priority).dict(by_alias=True)
        )

        # 数据库框架实现限制生成子任务只能一次性上报
        self.report_result(response)

    def format_restore_info(self, copy_path):
        s_authkey, s_authpwd, s_ip = self.param.get_dst_auth(), self.param.get_dst_pwd(), self.param.get_src_ip()
        r_authkey, r_authpwd, r_ip = self.param.get_dst_auth(), self.param.get_dst_pwd(), self.param.get_dst_ip()
        level = str(self.level)
        r_path = self.param.get_dst_path()
        exclude = ""
        dump = self.copy_format
        s_path = self.param.get_src_path()
        s_port = self.param.get_port()
        back_file_path = os.path.join(self.param.get_data_root_path(), copy_path)
        if (self.param.get_restore_level() == 1):
            level = self.param.get_restore_files()
        log.info(f"format restore info, src_path: {s_path}, r_path: {r_path}, level: {self.level}")
        return NdmpClientInterfaceS(s_authkey.encode(), r_authkey.encode(), s_ip.encode(), r_ip.encode(),
                                    level.encode(), s_authpwd.encode(), r_authpwd.encode(), s_path.encode(),
                                    r_path.encode(), back_file_path.encode(), exclude.encode(), self.copy_format,
                                    s_port)

    def get_last_full_copy_info(self, job_id):
        input_param = {
            RpcParamKey.APPLICATION: self.param.get_protect_object(),
            RpcParamKey.TYPES: LastCopyType.last_copy_type_dict.get(2),
            RpcParamKey.COPY_ID: "",
            RpcParamKey.JOB_ID: job_id
        }
        try:
            result = invoke_rpc_tool_interface(job_id, RpcParamKey.QUERY_PREVIOUS_CPOY, input_param)
        except Exception as err_info:
            log.error(f"Get last copy info fail.{err_info}")
            return {}
        return result   

    def get_last_full_copy_id(self, job_id):
        last_copy_info = self.get_last_full_copy_info(job_id)
        return last_copy_info.get("id")

    def is_copy_file_empty(self, copy):
        file_size = 0
        copy_id = self.get_copy_file_id(copy)
        copy_file = os.path.join(self.param.get_data_root_path(), copy_id, "fs_full_back.file")
        if os.path.exists(copy_file):
            file_size = os.path.getsize(copy_file)
        return file_size == 0

    def restore_inner(self, restore_copies):
        for restore_copy in restore_copies:
            #恢复副本前，先判断副本大小，为0则不需要恢复直接成功
            if self.is_copy_file_empty(restore_copy):
                continue
            self.restore_one_copy(restore_copy)
            self.ndmp_client.destroy()  # 结束客户端监控线程
            if self.task_status != SubJobStatusEnum.COMPLETED.value:
                log.error(f"job_id: {self.job_id}, sub_job: {self.sub_id} restore failed")
                copy_id = restore_copy.get("extendInfo").get("copy_id")
                ret_msg = self.read_log_message_file_for_restore_failure(copy_id)
                if (ret_msg != ""):
                    self.report_job_failed_msg(ret_msg)
                self.report_job_failed()
                return
        self.report_sub_job_success()

    def read_log_message_file_for_restore_failure(self, copy_path):
        log_msg_file = os.path.join(self.param.get_data_root_path(), f"{copy_path}", "RESTORE_LOG_MESSAGE")
        err_msg_set = set()
        ret_msg = ""
        try:
            flags = os.O_RDONLY
            modes = stat.S_IRUSR
            with os.fdopen(os.open(log_msg_file, flags, modes), 'r') as f:
                lines = f.readlines()
            for line in lines:
                if ("Warning:" in line or "Error:" in line or
                    "ERROR:" in line):
                    err_msg_set.add(line)
            for msg in err_msg_set:
                ret_msg += msg
        except Exception as e:
            log.error(e, exc_info=True)
        return ret_msg

    def report_job_failed_msg(self, msg):
        log.error(f"Report sub job detailed, job id: {self.job_id}, sub_job: {self.sub_id}")
        # 获取错误码对应的label和label参数进行上报
        log_detail = LogDetail(logInfo=NdmpLabel.NDMP_PLUGIN_RESTORE_PROCESS_MSG_LABEL,
                               logInfoParam=[self.sub_id, msg],
                               logLevel=LogLevel.ERROR.value)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.FAILED.value).dict(by_alias=True)
        report_job_details(self.req_id, self.job_id, sub_job_details)

    def report_job_failed(self):
        log.error(f"Restore failed, job id: {self.job_id}, error code: {self.error_code}")
        # 获取错误码对应的label和label参数进行上报
        label = self.label_dict.get(self.error_code, {}).get("label", NdmpLabel.EXECUTE_BACKUP_FAILED_LABEL)
        param = self.label_dict.get(self.error_code, {}).get("param", None)
        log_detail = LogDetail(logInfo=label, logInfoParam=param, logLevel=LogLevel.ERROR.value)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.FAILED.value).dict(by_alias=True)
        report_job_details(self.req_id, self.job_id, sub_job_details)

    def report_job_success(self):
        restore_status_file_path = os.path.join(self.cache_path, Constant.RESTORE_STATUS_FILE)
        log.info(f"report_job_success, restore_status_file {restore_status_file_path}")
        share_resource_manager = ShareResourceManager(restore_status_file_path, "r")
        json_str = share_resource_manager.read()
        share_resource_manager.close()
        if not json_str:
            self.report_job_failed()
            return

        task_status = json.loads(json_str)
        total_size = task_status["restoreSize"]
        str_size = format_capacity(total_size)
        duration = int(time.time()) - task_status["startTime"]
        speed = (total_size / duration) // 1024 if duration != 0 else 0
        log_detail = LogDetail(logInfo=NdmpLabel.EXECUTE_RESTORE_SUCCESS_LABEL,
                               logInfoParam=[str_size],
                               logLevel=LogLevel.INFO.value)

        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100, logDetail=[log_detail],
                                        dataSize=math.ceil(total_size // 1024), speed=speed,
                                        taskStatus=SubJobStatusEnum.COMPLETED.value).dict(by_alias=True)
        report_job_details(self.req_id, self.job_id, sub_job_details)

        # 删除记录任务统计信息的临时文件
        remove_file(restore_status_file_path)

    def report_sub_job_success(self):
        str_size = format_capacity(self.restore_size)
        log.info(f"{self.job_id} restore success, resotre size: {self.restore_size}")
        log_detail = LogDetail(logInfo=NdmpLabel.PLUGIN_RESTORE_SUBJOB_SUCCESS_LABEL,
                               logInfoParam=[self.sub_id],
                               logLevel=LogLevel.INFO.value)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100, logDetail=[log_detail],
                                        dataSize=math.ceil(self.restore_size // 1024), speed=self.speed,
                                        taskStatus=SubJobStatusEnum.COMPLETED.value).dict(by_alias=True)
        report_job_details(self.req_id, self.job_id, sub_job_details)

    def restore_one_copy(self, copy):
        data_path = self.param.get_data_path(copy["repositories"], RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        copy_id = self.get_copy_file_id(copy)
        restore_info = self.format_restore_info(copy_id)
        log.info(f"restore_one_copy, copy_id: {copy_id}")
        ret = self.ndmp_client.restore(restore_info)
        if ret < 0:
            log.error(f"{self.sub_id} restore auth failed")
            self.error_code = ret
            self.task_status = SubJobStatusEnum.FAILED.value
            return
        start_time = int(time.time())
        if self.start_time == 0:
            self.start_time = start_time
        while True:
            ndmp_status = self.ndmp_client.get_task_info()
            self.update_task_status(ndmp_status)
            log.info(f"ndmp status: {ndmp_status.status}, process bytes: {ndmp_status.process_bytes}")
            self.report_running_status(start_time, ndmp_status)
            if not self.restore_in_progress(ndmp_status):
                break
            time.sleep(10)

    def report_running_status(self, start_time, ndmp_status):
        log.info(f"job {self.job_id} is running, restore total size: {self.restore_size + ndmp_status.process_bytes},"
                 f" subjob {self.sub_id} restore size: {ndmp_status.process_bytes}")
        duration = int(time.time()) - start_time
        # 计算速度
        self.speed = ((self.restore_size + ndmp_status.process_bytes) / duration) // 1024 if duration != 0 else 0
        if (int(time.time()) - self.last_report_time) < 120:
            self.report_speed()
            return
        self.last_report_time = int(time.time())
        self.files_cnt = int(ndmp_status.files_cnt)
        str_capacity = format_capacity(self.restore_size + ndmp_status.process_bytes)
        log_detail = LogDetail(logInfo=NdmpLabel.EXECUTE_RESTORE_LABEL,
                               logInfoParam=[self.sub_id, str_capacity],
                               logLevel=LogLevel.INFO.value)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=50, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.RUNNING.value, speed=self.speed).dict(by_alias=True)
        report_job_details(self.req_id, self.job_id, sub_job_details)

    def update_task_status(self, ndmp_status):
        restore_status_file_path = os.path.join(self.cache_path, Constant.RESTORE_STATUS_FILE)
        read_resource_manager = ShareResourceManager(restore_status_file_path, "r")
        json_str = read_resource_manager.read()
        read_resource_manager.close()
        process_size = int(ndmp_status.process_bytes)
        # json_str为空，说明cache文件刚创建
        if not json_str:
            log.info(f"First calculate stats, restore job {self.job_id}, subjob {self.sub_id}")
            task_status = RestoreTaskStats(restoreSize=process_size, startTime=self.start_time).dict(by_alias=True)
        else:
            log.info(f"status file exists, restore job {self.job_id}, subjob {self.sub_id}")
            task_status = json.loads(json_str)
            # 这里的process_size是共拷贝数据量，因此需要减去之前已统计过的
            total_size = task_status["restoreSize"] + process_size - self.last_process_size
            task_status["restoreSize"] = total_size
        task_status["fileCnt"] = self.files_cnt
        task_status_json_str = json.dumps(task_status)
        update_resource_manager = ShareResourceManager(restore_status_file_path, "w+")
        update_resource_manager.update(task_status_json_str)
        update_resource_manager.close()
        self.last_process_size = process_size  # 更新已统计的数据量

    def restore_in_progress(self, ndmp_status):
        if ndmp_status.status == NdmpCopyStatus.INIT:
            self.task_status = SubJobStatusEnum.INITIALIZING.value
            return True
        elif (ndmp_status.status == NdmpCopyStatus.PROCESSING or
              ndmp_status.status == NdmpCopyStatus.COMPLETE):
            self.task_status = SubJobStatusEnum.RUNNING.value
            return True
        elif ndmp_status.status == NdmpCopyStatus.ABORT:
            log.error(f"subjob {self.sub_id} restore be abroted")
            self.task_status = SubJobStatusEnum.ABORTED.value
            return False
        elif ndmp_status.status == NdmpCopyStatus.FINISH:
            log.info(f"subjob {self.sub_id} restore success")
            self.task_status = SubJobStatusEnum.COMPLETED.value
            self.restore_size += ndmp_status.process_bytes
            return False
        else:
            log.error(f"subjob {self.sub_id} restore failed")
            self.task_status = SubJobStatusEnum.FAILED.value
            return False

    def get_copy_file_id(self, copy):
        extend_info = copy.get("extendInfo", {}).get("extendInfo", {})
        if extend_info:
            copy_id = extend_info.get("copy_id", "")
        else:
            copy_id = copy.get("extendInfo", {}).get("copy_id", "")
        return copy_id

    def restore_directory(self, copies):
        """
        功能描述：恢复目录格式的副本
        """

        def get_diff_copies(all_copies):
            """
            差异增量备份在累计增量备份或者低级别差异增量备份基础上进行,否则在全量基础上进行
            优先在低级别差异增量基础上进行
            """
            restore_copies_list = []
            last_level = -1
            for copy_info in all_copies:
                cur_level = int(copy_info.get("extendInfo", {}).get("level", "0"))
                if last_level < cur_level:
                    restore_copies_list.append(copy_info)
                    last_level = cur_level
                elif cur_level == last_level and cur_level == 1:  # example: 01123-->0123(这个1应该为后者)
                    restore_copies_list.pop()
                    restore_copies_list.append(copy_info)
            return restore_copies_list   

        restore_copies = list()
        restore_copy_file_list = list()
        # 判断副本链的最后一个副本的类型
        level = self.param.get_restore_level()
        if level == 0:
            log.info(f"NDMP full restore")
            restore_copies.append(copies[0])
            if (copies[0] != copies[-1]):
                restore_copies.append(copies[-1])
            self.restore_inner(restore_copies)
        elif level == 1:
            log.info(f"NDMP incremental restore")
            restore_copies.append(copies[-1])
            self.restore_inner(restore_copies)
        elif level > 1:
            log.info(f"NDMP differential restore")
            restore_copies = get_diff_copies(copies)
            self.restore_inner(restore_copies)
        else:
            log.error(f"Restore failed, the last copy is not full, increment or diff, req:{self.req_id}.")

    def restore_snapshot(self, copies):
        """
        功能描述：恢复快照格式的副本
        """
        self.restore_inner(copies)

    def restore(self):
        """
        功能描述：执行恢复任务
        """
        try:
            if self.param.get_sub_job_name() == "ReportNdmpRestoreInfo":
                log.info(f"subJob is ReportNdmpRestoreInfo, job id:{self.job_id}, sub_id: {self.sub_id}.")
                self.report_job_success()
                return

            self.req_opt_ip_rule(self.param.get_src_ip(), "add")
            self.req_opt_ip_rule(self.param.get_dst_ip(), "add")
            log.info(f"Enter NDMP restore, job_id: {self.job_id}, sub_id: {self.sub_id}, "
                     f"copy_format: {self.copy_format}.")
            self.report_restore_start()
            copies = self.param.get_copies()
            if self.copy_format == CopyFormat.DIRECTORY:
                self.restore_directory(copies)
            elif self.copy_format == CopyFormat.SNAPSHOT:
                self.restore_snapshot(copies)
            else:
                log.error(f"Restore failed, the copy format is not directory or snapshot, req:{self.req_id}.")
            self.req_opt_ip_rule(self.param.get_src_ip(), "del")
            self.req_opt_ip_rule(self.param.get_dst_ip(), "del")
        except Exception as e:
            log.error(f"Error occurs in restore: {e}", exc_info=True)
            self.report_job_failed()
            return


    def report_restore_start(self):
        """
        功能描述：上报开始执行子任务
        """
        log_detail = LogDetail(logInfo=NdmpLabel.START_RESTORE_LABEL, logInfoParam=[self.sub_id],
                               logLevel=LogLevel.INFO.value)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=0, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.RUNNING.value).dict(by_alias=True)
        report_job_details(self.req_id, self.sub_id, sub_job_details)

    def restore_post_job(self):
        """
        功能描述：后置任务
        """
        log.info(f"backup_post_job start, job id:{self.job_id}.")
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        return True

    def restore_post_job_progress(self):
        """
         功能描述：备份后置任务进度上报
        """
        log.info(f"restore_post_job_progress success, job id:{self.job_id}.")
        progress = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        self.report_result(progress.dict(by_alias=True))
        return True
