#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import stat
import threading
import time
import uuid

from common.common import output_result_file, execute_cmd
from common.common_models import ActionResult
from common.common_models import SubJobModel, LogDetail
from common.const import SubJobPriorityEnum, SubJobStatusEnum, ExecuteResultEnum, SubJobTypeEnum, SubJobPolicyEnum, \
    DBLogLevel, ReportDBLabel, ParamConstant, Encoding
from common.logger import Logger
from common.number_const import NumberConst
from db2.backup.util.db2_backup_util import Db2BackupUtil
from db2.comm import db2_verification
from db2.comm.const import Db2JobName
from db2.comm.constant import HadrRestoreSubjobName
from db2.comm.error_code import Db2ErrCode
from db2.comm.util.common_util import Db2CommonUtil
from db2.comm.util.dpf_util import DpfUtil
from db2.comm.util.job_decorators import job_exception_decorator
from db2.comm.util.param_util import Db2ParamUtil
from db2.restore.db2_restore_base import Db2RestoreBase
from db2.restore.hadr.hadr_parse_restore_params import HadrParseRestoreParams
from db2.comm.constant import HadrRoleType

LOGGER = Logger().get_logger(filename="db2.log")


class HadrRestore(Db2RestoreBase):
    def __init__(self, task_name, pid, job_id, sub_job_id, param_dict):
        super().__init__(task_name, pid, job_id, sub_job_id, param_dict)
        self.parse_param = HadrParseRestoreParams(task_name, pid, job_id, sub_job_id, param_dict)
        self.service = None
        self.init_service()
        self.sub_job_array = []
        self.action_result = ActionResult(code=ExecuteResultEnum.SUCCESS)
        self.restore_type_dict = {}
        self.init_restore_type_dict()
        self.restore_sub_func_dict = {}
        self.init_restore_sub_func_dict()
        self.restore_type = Db2ParamUtil.get_restore_type(self.param_dict)
        self.log_detail = LogDetail(logInfo=ReportDBLabel.RESTORE_SUB_START_COPY,
                                    logInfoParam=[self.sub_job_id], logLevel=DBLogLevel.INFO.value)

    def build_sub_job(self, job_name, job_priority, job_policy, job_info):
        return SubJobModel(
            jobId=self.job_id, jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value, jobInfo=job_info,
            jobName=job_name, jobPriority=job_priority, policy=job_policy).dict(by_alias=True)

    def build_sub_job_info(self):
        job_info = dict()
        job_info["primaryNode"] = self.service.agent_id
        return json.dumps(job_info)

    def init_service(self):
        pass

    def check_resource(self):
        pass

    def init_restore_sub_func_dict(self):
        self.restore_sub_func_dict = {
            HadrRestoreSubjobName.CHECK: self.exec_restore_sub_check,
            HadrRestoreSubjobName.STOP: self.exec_restore_sub_stop,
            HadrRestoreSubjobName.RESTORE: self.exec_restore_sub_restore,
            HadrRestoreSubjobName.START: self.exec_restore_sub_start
        }

    def init_restore_type_dict(self):
        pass

    @job_exception_decorator(is_restore=True)
    def allow_restore_in_local_node(self):
        """是否允许本地运行恢复"""
        LOGGER.info(f"Start execute allow restore in local node job.{self.parse_param.get_log_comm()}")
        if self.parse_param.get_sub_job():
            LOGGER.info("The subtask allow is not supported.")
            return self.exec_job_when_success()

        check_steps = [
            self.check_os_user_exist,
        ]
        for step in check_steps:
            if not step():
                LOGGER.error(f"Failed to execute check cmd.{self.parse_param.get_log_comm()}")
                return self.exec_job_when_failed()

        if not self.check_resource():
            LOGGER.error(f"Failed to execute resource check.{self.parse_param.get_log_comm()}")
            return self.exec_job_when_failed()

        LOGGER.info(f"Execute allow restore in local completed.{self.parse_param.get_log_comm()}")
        return self.exec_job_when_success()

    @job_exception_decorator(write_progress=True, is_restore=True)
    def exec_restore_pre_job(self):
        """恢复前置"""
        LOGGER.info(f"Start execute restore pre job.{self.parse_param.get_log_comm()}")
        return self.exec_job_when_success()

    @job_exception_decorator(is_restore=True)
    def exec_restore_gen_sub_job(self):
        job_info = self.build_sub_job_info()
        LOGGER.info(f"Start execute restore database gen sub job.{self.parse_param.get_log_comm()}")

        # 所有节点执行前置检查
        sub_job = self.build_sub_job(HadrRestoreSubjobName.CHECK, SubJobPriorityEnum.JOB_PRIORITY_1.value,
                                     SubJobPolicyEnum.EVERY_NODE_ONE_TIME, job_info)
        self.sub_job_array.append(sub_job)

        # 所有节点执行停止hadr
        sub_job = self.build_sub_job(HadrRestoreSubjobName.STOP, SubJobPriorityEnum.JOB_PRIORITY_2.value,
                                     SubJobPolicyEnum.EVERY_NODE_ONE_TIME, job_info)
        self.sub_job_array.append(sub_job)

        # 所有节点执行恢复
        sub_job = self.build_sub_job(HadrRestoreSubjobName.RESTORE, SubJobPriorityEnum.JOB_PRIORITY_3.value,
                                     SubJobPolicyEnum.EVERY_NODE_ONE_TIME, job_info)
        self.sub_job_array.append(sub_job)

        # 启动所有hadr节点
        sub_job = self.build_sub_job(HadrRestoreSubjobName.START, SubJobPriorityEnum.JOB_PRIORITY_4.value,
                                     SubJobPolicyEnum.EVERY_NODE_ONE_TIME, job_info)
        LOGGER.info(f"sub_job: {sub_job}")
        self.sub_job_array.append(sub_job)

        output_result_file(self.pid, self.sub_job_array)
        LOGGER.info(f"Execute restore database gen job succ.{self.parse_param.get_log_comm()}")
        return self.exec_job_when_success()

    @job_exception_decorator(write_progress=True, is_restore=True)
    def exec_restore(self):
        """恢复任务"""
        LOGGER.info(f"Start execute restore.")
        self.sub_job_detail.progress = NumberConst.FIVE
        self.sub_job_detail.task_status = SubJobStatusEnum.RUNNING.value
        self.write_progress_to_file()
        sub_job_name = self.parse_param.get_sub_job_name()
        sub_job_func = self.restore_sub_func_dict.get(sub_job_name, None)
        if not sub_job_func:
            LOGGER.error(f"Unknown sub({sub_job_name}).{self.parse_param.get_log_comm()}")
            self.action_result.message = f"Unknown sub job: {sub_job_name}."
            return self.exec_job_when_failed()
        ret = sub_job_func()
        if not ret:
            LOGGER.error(f"Exec sub job({sub_job_name}) failed.{self.parse_param.get_log_comm()}")
            self.sub_job_detail.progress = NumberConst.HUNDRED
            self.sub_job_detail.task_status = SubJobStatusEnum.FAILED.value
            self.log_detail.log_info = ReportDBLabel.SUB_JOB_FALIED
            self.log_detail.log_level = DBLogLevel.ERROR.value
            self.sub_job_detail.log_detail = [self.log_detail]
            self.write_progress_to_file()
            return self.exec_job_when_failed()
        LOGGER.info(f"Execute restore sub({sub_job_name}) successfully.")
        self.sub_job_detail.progress = NumberConst.HUNDRED
        self.sub_job_detail.task_status = SubJobStatusEnum.COMPLETED.value
        self.write_progress_to_file()
        return self.exec_job_when_success()

    def exec_restore_sub_check(self):
        LOGGER.info("Start execute restore sub(check).")
        check_steps = [
            self.check_os_user_exist,
        ]
        for step in check_steps:
            if not step():
                LOGGER.error(f"Failed to execute sub check cmd.{self.parse_param.get_log_comm()}")
                return False
        LOGGER.info(f"Execute restore sub (check) completed.{self.parse_param.get_log_comm()}")
        return True

    def exec_restore_sub_stop(self):
        LOGGER.info("Start execute restore sub(stop).")
        try:
            self.service.stop_hadr()
        except Exception as exception_str:
            LOGGER.exception(exception_str, exc_info=True)
            return False
        LOGGER.info("Execute execute restore sub job(stop) completed.")
        return True

    def exec_restore_sub_restore(self):
        LOGGER.info("Start execute sub(restore) task.")

        self.sub_job_detail.log_detail = [self.log_detail]
        self.write_progress_to_file()
        self.sub_job_detail.log_detail = []

        progress_thread = threading.Thread(name="progress", target=self.upload_report_progress)
        # 任务意外结束时子线程也结束任务
        progress_thread.daemon = True
        progress_thread.start()
        # 1. 根据恢复类型获取执行函数
        restore_func = self.restore_type_dict.get(self.restore_type)
        if not restore_func:
            LOGGER.error(f"Unknown restore type: {self.restore_type}.{self.parse_param.get_log_comm()}")
            return False
        if not self.service.mount_bind_logtarget():
            LOGGER.error(f"Failed to bind the logtarget.{self.parse_param.get_log_comm()}")
            return False

        # 2. 执行恢复操作
        if not restore_func():
            LOGGER.error(f"Failed to execute the restore sub job.{self.parse_param.get_log_comm()}")
            return False

        # restore后前滚前清空归档目录（主、备节点都执行）
        log_arch_meth_val = Db2CommonUtil.get_log_arch_meth_val_of_db(self.service.os_user, self.service.db_name)
        if Db2CommonUtil.check_log_arch_meth_is_dir(log_arch_meth_val):
            arch_log_path = os.path.realpath(
                os.path.join(log_arch_meth_val, self.service.instance_name, self.service.db_name))
            Db2CommonUtil.remove_file_if_exists(arch_log_path)
            Db2CommonUtil.clear_dir_if_exists(arch_log_path)
        # 3. 执行回滚操作
        if self.service.check_node_is_primary():
            LOGGER.info("The current node is primary node, need to execute rollforward.")
            if not self.exec_rollforward():
                LOGGER.error(f"Failed to roll back the database.{self.parse_param.get_log_comm()}")
                return False
        else:
            LOGGER.info("The standby node does not need to execute rollforward.")
        self.sub_job_detail.progress = NumberConst.HUNDRED
        self.sub_job_detail.task_status = SubJobStatusEnum.COMPLETED.value
        self.log_detail.log_info = ReportDBLabel.SUB_JOB_SUCCESS
        self.sub_job_detail.log_detail = [self.log_detail]
        self.write_progress_to_file()
        LOGGER.info("Execute execute sub(restore) completed.")
        return True

    def exec_restore_sub_start(self):
        LOGGER.info("Start execute restore sub(start).")

        # 1. 恢复hadr配置
        try:
            self.service.restore_db_hadr_cfg()
        except Exception as exception_str:
            LOGGER.exception(exception_str, exc_info=True)
            return False
        # 2. 启动hadr
        try:
            self.service.start_hadr()
        except Exception as exception_str:
            LOGGER.exception(exception_str, exc_info=True)
            return False
        LOGGER.info("Execute execute restore sub job(start) completed.")
        return True

    def full_restore(self):
        pass

    def incr_restore(self):
        pass

    def diff_restore(self):
        pass

    @job_exception_decorator(write_progress=True, is_restore=True)
    def exec_restore_post_job(self):
        """恢复后置"""
        LOGGER.info("Start execute restore post.")
        self.sub_job_detail.progress = NumberConst.FIVE
        self.sub_job_detail.task_status = SubJobStatusEnum.RUNNING.value
        self.write_progress_to_file()
        self.clean_resource()
        LOGGER.info("Execute restore post successfully.")
        return self.exec_job_when_success()

    def report_progress(self):
        """上报进度"""
        if self.sub_job_id:
            Db2BackupUtil.report_progress_utl(self.pid, self.sub_job_id, self.parse_param.cache_path)
        else:
            Db2BackupUtil.report_progress_utl(self.pid, self.job_id, self.parse_param.cache_path)

    def output_action_result(self, action_ret: ActionResult):
        """输出任务结果到结果文件"""
        # 拆分子任务在执行函数内上报
        if self.task_name == Db2JobName.RESTORE_GEN_SUB:
            return

        # 需要上报进度的任务的列表
        report_progress_task = [
            Db2JobName.RESTORE_PRE,
            Db2JobName.RESTORE,
            Db2JobName.RESTORE_POST
        ]

        # 上报任务进度
        if self.task_name in report_progress_task:
            self.sub_job_detail.progress = NumberConst.HUNDRED
            if action_ret.code == ExecuteResultEnum.SUCCESS.value:
                self.sub_job_detail.task_status = SubJobStatusEnum.COMPLETED.value
            else:
                self.sub_job_detail.task_status = SubJobStatusEnum.FAILED.value
            self.write_progress_to_file()

        super(HadrRestore, self).output_action_result(action_ret)

    def check_os_user_exist(self):
        if not self.service.check_os_user_exist():
            LOGGER.error(f"The os user does not exists.{self.parse_param.get_log_comm()}")
            self.action_result.body_err = Db2ErrCode.OS_USER_NOT_EXISTS
            self.action_result.message = "The os user does not exists"
            return False
        LOGGER.info("Checking the os user succeeded.")
        return True

    def check_database_exist(self):
        if not db2_verification.check_database_exists(self.service.os_user, self.service.db_name):
            LOGGER.error(f"The database does not exist.{self.parse_param.get_log_comm()}")
            self.action_result.message = "The database does not exist."
            self.action_result.body_err_params = [self.service.db_name]
            self.action_result.body_err = Db2ErrCode.DATABASE_NOT_EXISTS
            return False
        LOGGER.info("Checking the cluster database exist succeeded.")
        return True

    def check_database_is_hadr(self):
        if not self.service.check_database_is_hadr():
            LOGGER.error(f"Hadr is not enabled for the database.{self.parse_param.get_log_comm()}")
            self.action_result.message = "Hadr is not enabled for the database."
            self.action_result.body_err = Db2ErrCode.ERR_DATABASE_STATUS
            return False
        LOGGER.info("Checking the cluster database hadr succeeded.")
        return True

    def check_cluster_connectivity(self):
        if not self.service.check_cluster_connectivity():
            LOGGER.error(f"Failed to check cluster connectivity.{self.parse_param.get_log_comm()}")
            self.action_result.message = "Failed to check cluster connectivity."
            self.action_result.body_err = Db2ErrCode.DB_SERVICE_ERROR
            return False
        process_service = Db2CommonUtil.check_process_service(self.service.os_user)
        if not process_service:
            LOGGER.error(f"Process is not start.{self.parse_param.get_log_comm()}")
            self.action_result.message = ""
            self.action_result.body_err = Db2ErrCode.DB_SERVICE_ERROR
            return False
        LOGGER.info("Checking the cluster connectivity succeeded.")
        return True

    def check_database_pending(self):
        if not self.service.check_node_is_primary():
            LOGGER.info("The standby node does not need to be checked.")
            return True
        pending_ret, pending_info_str = DpfUtil.check_database_pending_status_for_restore(self.service.os_user,
                                                                                          self.service.db_name)
        if pending_ret:
            LOGGER.error(f"There are pending tasks in the db2 database.{self.parse_param.get_log_comm()}")
            self.action_result.body_err = Db2ErrCode.DATABASE_EXISTS_PENDING
            self.action_result.body_err_params = [self.service.db_name]
            self.action_result.message = "There are pending tasks in the db2 single database"
            return False
        return True

    def check_hadr_cfg(self):
        if not self.service.check_hadr_cfg():
            LOGGER.error(f"Failed to verify the hadr cfg.{self.parse_param.get_log_comm()}")
            self.action_result.message = "Failed to verify the hadr cfg"
            self.action_result.body_err = Db2ErrCode.ERROR_COMMON_INVALID_PARAMETER
            return False
        return True

    def check_node_is_primary(self):
        try:
            hadr_role = self.service.get_cluster_node_type()
        except Exception as exception_str:
            LOGGER.error(f"Failed to obtain the node information.{self.parse_param.get_log_comm()}")
            self.action_result.message = "Failed to get the hadr cfg"
            self.action_result.body_err = Db2ErrCode.DB_SERVICE_ERROR
            return False
        return hadr_role == HadrRoleType.PRIMARY

    def report_job_details(self, job_detail):
        """
        功能描述：上报进度
        参数：无
        @pid: pid
        @job_detail: 任务详情
        返回值：无
        """
        input_file = os.path.join(ParamConstant.RESULT_PATH, str(uuid.uuid4()) + '_input')
        output_file = os.path.join(ParamConstant.RESULT_PATH, str(uuid.uuid4()) + '_output')
        json_str = json.dumps(job_detail.dict(by_alias=True))
        flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
        modes = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR
        with os.fdopen(os.open(input_file, flags, modes), 'w') as jsonfile:
            jsonfile.write(json_str)
        cmd = f'sh {ParamConstant.BIN_PATH}/rpctool.sh ReportJobDetails' \
              f' {input_file} {output_file}'
        code, out, err = execute_cmd(cmd)
        LOGGER.info(f'report_job_details, code: {code}, out: {out}, err: {err}')
        with open(output_file, 'r', encoding=Encoding.INTERNAL_ENCODING) as file_read:
            LOGGER.info(f'{self.pid} report result: {file_read.readlines()}')
        os.remove(input_file)
        os.remove(output_file)

    def write_progress_to_file(self):
        LOGGER.info(f"Start write progress to file.")
        # 主动上报
        Db2CommonUtil.proactively_report_progress(pid=self.pid, job_detail=self.sub_job_detail)
        self.sub_job_detail.log_detail = []

    def exec_rollforward(self):
        pass

    def exec_job_when_execpt(self):
        if self.log_detail.log_info == ReportDBLabel.RESTORE_SUB_START_COPY:
            self.log_detail.log_info = ReportDBLabel.SUB_JOB_FALIED
        self.log_detail.log_level = DBLogLevel.ERROR.value
        self.action_result.code = ExecuteResultEnum.INTERNAL_ERROR.value
        return self.action_result

    def exec_job_when_success(self):
        self.action_result.code = ExecuteResultEnum.SUCCESS.value
        self.log_detail.log_info = ReportDBLabel.SUB_JOB_SUCCESS
        self.log_detail.log_level = DBLogLevel.INFO.value
        return self.action_result

    def exec_job_when_failed(self):
        if self.log_detail.log_info == ReportDBLabel.RESTORE_SUB_START_COPY:
            self.log_detail.log_info = ReportDBLabel.SUB_JOB_FALIED
        self.log_detail.log_level = DBLogLevel.ERROR.value
        self.action_result.code = ExecuteResultEnum.INTERNAL_ERROR.value
        return self.action_result

    def clean_resource(self):
        try:
            self.service.clean_resource()
        except Exception as exception_str:
            LOGGER.exception(exception_str, exc_info=True)

    def upload_report_progress(self):
        while True:
            if self.sub_job_detail.task_status != SubJobStatusEnum.RUNNING.value:
                LOGGER.debug(f"Stop report progress thread. current job detail : {self.sub_job_detail.dict()}")
                break
            LOGGER.debug(f"Report progress detail: {self.sub_job_detail.dict()}")
            Db2CommonUtil.proactively_report_progress(pid=self.pid, job_detail=self.sub_job_detail)
            self.sub_job_detail.log_detail = []
            time.sleep(NumberConst.THIRTY)
