#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os

from common.common import write_content_to_file, execute_cmd
from common.util.check_utils import is_ip_address
from common.const import SubJobStatusEnum, DBLogLevel, CMDResult
from common.exception.common_exception import ErrCodeException
from common.util.cmd_utils import cmd_format
from common.util.exec_utils import su_exec_cat_cmd
from informix import LOGGER
from informix.comm.comm import set_status, set_permisson, check_instance_close, identify_env_user, GbaseInfo
from informix.comm.common_models import SubJobModel, LogDetail
from informix.comm.const import SubJobPriority, SubJobName, InformixBaseCommand, PERMISSION_755, ErrorCode, \
    InformixInfo, GbaseCommand
from informix.service.restore.restorebase import RestoreBase


class ClusterRestore(RestoreBase):
    def __init__(self, job_manager, param_obj):
        super(ClusterRestore, self).__init__(job_manager, param_obj)
        self.peer_instance_name = ''
        self.copy_prim_name = ''
        self.cluster_init_param()
        self.db_user = identify_env_user() if identify_env_user() else InformixInfo.INFORMIX_USER

    @staticmethod
    def get_secondary_ip(db_user):
        """
        获取备节点IP地址
        """
        code, std_out = su_exec_cat_cmd("~/.rhosts", db_user)
        if code != CMDResult.SUCCESS:
            LOGGER.error(f"Exec query secondary cmd failed, code: {code}, std_out: {std_out}")
            return ""

        out_list = std_out.split('\n')
        for lens in out_list:
            if db_user not in lens:
                continue
            tmp_ip = lens.split()[0].strip()
            if not is_ip_address(tmp_ip):
                LOGGER.error(f"Unexpected ip:{tmp_ip}")
                ErrCodeException(ErrorCode.ERROR_PARAM.value)
            return tmp_ip
        LOGGER.error("Get secondary node ip fail.")
        return ""

    @staticmethod
    def get_current_os_type():
        """
        获取当前操作系统类型
        """
        query_cmd = "uname -s"
        code, std_out, std_err = execute_cmd(query_cmd)
        if code != CMDResult.SUCCESS:
            LOGGER.error("Query os type fail.")
            return ""
        return std_out

    @staticmethod
    def get_current_instance_name(db_user):
        """
        获取当前informix 实例名
        """
        server_name = 'INFORMIXSERVER' if db_user == InformixInfo.INFORMIX_USER else 'GBASEDBTSERVER'
        query_cmd = f'su - {db_user} -c "env"'
        _, std_out, _ = execute_cmd(query_cmd)
        std_out_list = std_out.split('\n')
        for lens in std_out_list:
            if server_name not in lens:
                continue
            return lens.split('=')[1].strip()
        LOGGER.error("Get current instance name fail.")
        return ""

    def cluster_init_param(self):
        self.peer_instance_name = self.param_obj.get_peer_instance_name()
        self.copy_prim_name = self.arch_copy_instance_name if self.arch_copy_instance_name else self.copy_instance_name

    def gen_sub_job(self):
        """
        拆分恢复子任务1.前置子任务，2恢复子任务，3设置主备子任务，4拉起主节点子任务
        :return:
        """
        if self.target_location == 'original':
            sub_job_arr = self.gen_sub_job_original()
        else:
            sub_job_arr = self.gen_sub_job_new()
        if not sub_job_arr:
            LOGGER.error('Failed to generate the restoration subtask.%s', self.get_log_common())
        self.return_result = sub_job_arr
        self.update_result()
        return

    def gen_sub_job_original(self):
        sub_job_array = []
        for node in self.targetenv_nodes:
            node_id = self.param_obj.get_node_id(node)
            # 所有节点都拆分前置任务
            # 主备节点分别拆分恢复子任务和设置主备状态子任务
            # 主节点拆分拉起子任务,拉起子任务忽略任务结果
            node_status = self.param_obj.get_node_extend_instance_status(node)
            if node_status == "On-Line (Prim)":
                sub_job = self.build_sub_job(SubJobName.PRI_PRE_JOB, SubJobPriority.JOB_PRIORITY_1.value, node_id)
                sub_job_array.append(sub_job)
                sub_job = self.build_sub_job(SubJobName.ORIGINAL_RESTORE, SubJobPriority.JOB_PRIORITY_2.value, node_id)
                sub_job_array.append(sub_job)
                sub_job = self.build_sub_job(SubJobName.SET_PRIMARY, SubJobPriority.JOB_PRIORITY_3.value, node_id,
                                             ignore_failed=True)
                sub_job_array.append(sub_job)
                sub_job = self.build_sub_job(SubJobName.START, SubJobPriority.JOB_PRIORITY_4.value, node_id,
                                             ignore_failed=True)
                sub_job_array.append(sub_job)
            else:
                sub_job = self.build_sub_job(SubJobName.SEC_PRE_JOB, SubJobPriority.JOB_PRIORITY_1.value, node_id)
                sub_job_array.append(sub_job)

        return sub_job_array

    def gen_sub_job_new(self):
        sub_job_array = []
        for node in self.targetenv_nodes:
            node_id = self.param_obj.get_node_id(node)
            node_status = self.param_obj.get_node_extend_instance_status(node)
            # 所有节点都拆分前置任务
            # 主节点拆分拉起子任务,拉起子任务忽略任务结果
            if node_status == "On-Line (Prim)":
                sub_job = self.build_sub_job(SubJobName.PRI_PRE_JOB, SubJobPriority.JOB_PRIORITY_1.value, node_id)
                sub_job_array.append(sub_job)
                sub_job = self.build_sub_job(SubJobName.NEW_RESTORE, SubJobPriority.JOB_PRIORITY_2.value, node_id)
                sub_job_array.append(sub_job)
                sub_job = self.build_sub_job(SubJobName.SET_PRIMARY, SubJobPriority.JOB_PRIORITY_3.value, node_id,
                                             ignore_failed=True)
                sub_job_array.append(sub_job)
                sub_job = self.build_sub_job(SubJobName.START, SubJobPriority.JOB_PRIORITY_4.value, node_id,
                                             ignore_failed=True)
                sub_job_array.append(sub_job)
            else:
                sub_job = self.build_sub_job(SubJobName.SEC_PRE_JOB, SubJobPriority.JOB_PRIORITY_1.value, node_id)
                sub_job_array.append(sub_job)
        return sub_job_array

    def build_sub_job(self, job_name, job_priority, exec_node_id, ignore_failed=False, job_info=None):
        subjob_instance = SubJobModel(self._job_id, job_name, job_priority, 4, ignore_failed, exec_node_id, job_info)
        return subjob_instance.dict(by_alias=True)

    def restore(self):
        """
        恢复任务
        """
        LOGGER.info(f"Start to execute the restoration subtask, sub_job_name: {self._sub_job_name}.")
        self._support_progress(SubJobStatusEnum.RUNNING, 1, 0, DBLogLevel.INFO)
        job_map = {
            SubJobName.SEC_PRE_JOB: self.restore_pre_instance,
            SubJobName.PRI_PRE_JOB: self.restore_pre_instance,
            SubJobName.ORIGINAL_RESTORE: self.restore_base,
            SubJobName.NEW_RESTORE: self.restore_base,
            SubJobName.SET_PRIMARY: self.set_primary,
            SubJobName.SET_SECONDARY: self.set_secondary,
            SubJobName.START: self.start_instance,
        }
        fun = job_map.get(self._sub_job_name)
        if not fun:
            LOGGER.error("Find %s fail.%s", self._sub_job_name, self.get_log_common())
            self._support_progress(SubJobStatusEnum.FAILED, 100, 0, DBLogLevel.INFO)
            return
        try:
            LOGGER.info(f"Run -----------------------:%s", fun.__name__)
            fun()
        except Exception as e_obj:
            LOGGER.exception(e_obj)
            self._support_progress(SubJobStatusEnum.FAILED, 100, 0, DBLogLevel.INFO)
        finally:
            if self._sub_job_name == SubJobName.ORIGINAL_RESTORE or self._sub_job_name == SubJobName.NEW_RESTORE:
                self.post_copy_umount_bind()
        return

    def restore_pre_instance(self):
        if self._sub_job_name == SubJobName.PRI_PRE_JOB:
            self.restore_pre()
            return
        if self._sub_job_name == SubJobName.SEC_PRE_JOB:
            self.secondary_pre()
            return
        self._support_progress(SubJobStatusEnum.COMPLETED, 100, log_level=DBLogLevel.INFO)
        return

    def secondary_pre(self):
        LOGGER.info("Start to prepare parameters for the standby node.")
        # 1. 实例是否在线
        if not check_instance_close(self._env_dict, self.db_user):
            LOGGER.error("Instance not close. %s", self.get_log_common())
            raise ErrCodeException(ErrorCode.ERR_DB_IS_RUNNING.value)

        # 2. 清理实例进程
        if not self.clean_instance_process():
            LOGGER.error("Failed to clear the instance process.")
            self._support_progress(SubJobStatusEnum.FAILED)
            return

    def set_primary(self):
        """
        设置节点为主节点
        """
        LOGGER.info("Start setting the primary node.")
        if not set_status(self.peer_instance_name, db_user=self.db_user):
            LOGGER.error('Set primary node fail.%s', self.get_log_common())
            self._support_progress(SubJobStatusEnum.FAILED, 100, 0, DBLogLevel.INFO)
            return

        # 开始执行拉起备节点
        self.generate_script()
        script_name = os.path.join(self._cache_area, f"{self._job_id}_{self._instance_name_curr}")
        up_cmd = f"sh {script_name}"
        code, std_out, std_err = execute_cmd(up_cmd)
        if code != CMDResult.SUCCESS:
            LOGGER.error(f"Cmd exec failed: {up_cmd}, code : {code}, std_out: {std_out}, std_err: {std_err}")
            log_detail = LogDetail(log_info="informix_restore_up_sec_failed_label", log_info_param=[self._sub_job_id],
                                   log_detail=ErrorCode.EXEC_BACKUP_RECOVER_CMD_FAIL, log_level=DBLogLevel.WARN,
                                   log_detail_param=["Up-Sec", std_err])
            self._support_progress(SubJobStatusEnum.FAILED, 100, log_detail=log_detail)
            return
        LOGGER.info(f"Cmd exec success: {up_cmd}, code : {code}, std_out: {std_out}, std_err: {std_err}")
        self._support_progress(SubJobStatusEnum.COMPLETED, 100, 0, DBLogLevel.INFO)

    def set_secondary(self):
        """
        设置节点为备节点
        """
        LOGGER.info("Start setting the secondary node.")
        if not set_status(self.peer_instance_name, 'secondary', db_user=self.db_user):
            LOGGER.error('Set secondary node fail.%s', self.get_log_common())
            self._support_progress(SubJobStatusEnum.FAILED, 100, 0, DBLogLevel.INFO)
            return
        self._support_progress(SubJobStatusEnum.COMPLETED, 100, 0, DBLogLevel.INFO)

    def generate_script(self):
        """
        生成备节点重建脚本
        """
        exec_with_user = GbaseCommand.EXEC_WITH_GBASEDBT\
            if self.db_user == GbaseInfo.GBASE_USER else InformixBaseCommand.EXEC_WITH_INFORMIX
        script_name = os.path.join(self._cache_area, f"{self._job_id}_{self._instance_name_curr}")
        LOGGER.info(f"Start generate rebuild script: {script_name}")
        secondary_ip = self.get_secondary_ip(self.db_user)
        LOGGER.info(f"Get secondary ip address: {secondary_ip}")
        content = f"#!bin/sh\n"
        linux_cmd_cnt = cmd_format(InformixBaseCommand.REBUILD_SECONDARY_NODE_LINUX, secondary_ip)
        linux_cmd = cmd_format(exec_with_user, linux_cmd_cnt)
        aix_cmd_cnt = cmd_format(InformixBaseCommand.REBUILD_SECONDARY_NODE_AIX, secondary_ip)
        aix_cmd = cmd_format(exec_with_user, aix_cmd_cnt)
        os_type = self.get_current_os_type()
        current_name = self.get_current_instance_name(self.db_user)

        if "AIX" in os_type:
            content += aix_cmd
            content += "\n"
            up_cmd = f'rsh {secondary_ip} \\". ~/.profile;onmode -d secondary {current_name}\\"'
        else:
            content += linux_cmd
            content += "\n"
            up_cmd = f'rsh {secondary_ip} \\"source ~/.bash_profile;onmode -d secondary {current_name}\\"'

        up_cmd_real = cmd_format(exec_with_user, up_cmd)
        content += up_cmd_real
        content += "\n"
        content = content.replace('\'', '"')
        LOGGER.info(f"Script content is: {content}")
        write_content_to_file(script_name, content)
        set_permisson(script_name, PERMISSION_755)
