#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import shutil
import json

from common import common
from common.const import CMDResult
from common.logger import Logger
from common.parse_parafile import get_env_variable
from db2.comm.const import Db2Const, Db2JsonConstant
from db2.comm.constant import HadrRoleType, HadrStateType, HadrCfgField, HadrConnectStatus, ParamField
from db2.comm.db2_cmd import check_os_user
from db2.comm.db2_exception import ErrCodeException
from db2.comm.error_code import Db2ErrCode
from db2.comm.util.common_util import Db2CommonUtil
from db2.comm.util.dpf_util import DpfUtil
from db2.restore.hadr.hadr_parse_restore_params import HadrParseRestoreParams

LOGGER = Logger().get_logger(filename="db2.log")


class HadrRestoreService:

    def __init__(self, parse_params_obj: HadrParseRestoreParams):
        self.parse_param = parse_params_obj
        self.os_user = ""
        self.instance_name = ""
        self.db_name = ""
        self.hadr_role = ""
        self.agent_id = Db2CommonUtil.get_agent_id()
        self.local_database_node = {}
        self.node_index = 0
        self.init_local_node_param()

    @staticmethod
    def execute_cmd_without_raise_ex(db_cmd):
        LOGGER.info(f"Starting execute command: {db_cmd} without raise exception ...")
        return_code, out, err = common.execute_cmd(db_cmd)
        LOGGER.info(f"Execute command: {db_cmd} complete, return code: {return_code}, out: {out}, err: {err}.")

    @staticmethod
    def execute_cmd_without_echo(db_cmd):
        LOGGER.info(f"Starting execute database cmd: {db_cmd}.")
        return_code, out, err = common.execute_cmd_oversize_return_value(db_cmd)
        if return_code != CMDResult.SUCCESS.value:
            LOGGER.error(f"Execute database command: {db_cmd} failed. return code: {return_code}, "
                         f"out: {out}, err: {err}.")
            raise Exception("Execute database command failed")
        LOGGER.info(f"Execute database cmd success.")

    @staticmethod
    def execute_cmd_with_echo(db_cmd):
        LOGGER.info(f"Starting execute database cmd: {db_cmd}.")
        return_code, out, err = common.execute_cmd(db_cmd)
        if return_code != CMDResult.SUCCESS.value:
            LOGGER.error(f"Execute database command: {db_cmd} failed. return code: {return_code}, "
                         f"out: {out}, err: {err}.")
            raise Exception("Execute database command failed.")

        if not out:
            LOGGER.error("The execution result is empty.")
            raise Exception("The execution result is empty.")

        LOGGER.info("Execute database cmd success.")
        return out

    @staticmethod
    def execute_check_cmd(check_cmd):
        LOGGER.info(f"Starting execute check cmd: {check_cmd}.")
        return_code, out, err = common.execute_cmd(check_cmd)
        if return_code != CMDResult.SUCCESS.value:
            LOGGER.error(f"Execute check cmd: {check_cmd} failed. return code: {return_code}, "
                         f"out: {out}, err: {err}.")
            return False
        return True

    @staticmethod
    def find_in_list(str_list: list, str_find):

        for str_tmp in str_list:
            if str_find in str_tmp:
                return str_tmp
        return ""

    def init_local_node_param(self):
        target_host_nodes = self.parse_param.get_target_host_nodes()
        target_database_nodes = self.parse_param.get_target_database_nodes()
        for target_host_node in target_host_nodes:
            host_id = self.parse_param.get_host_id_from_host_node(target_host_node)
            if host_id == self.agent_id:
                self.local_database_node = target_database_nodes[self.node_index]
                os_user_key = Db2JsonConstant.JOB_TARGETENV_NODE_AUTH_AUTHKEY.format(self.node_index,
                                                                                     self.parse_param.pid)
                self.os_user = get_env_variable(os_user_key)
                if common.check_command_injection(self.os_user) or not self.os_user:
                    raise Exception("Param of os user invalid")
                self.instance_name = self.parse_param.get_instance_name_from_database_node(self.local_database_node)
                self.db_name = self.parse_param.get_db_name_from_database_node(self.local_database_node)
                self.hadr_role = self.get_hadr_role()
                break
            self.node_index += 1
        if not self.local_database_node:
            LOGGER.error(f"The node:{self.agent_id} is not in the target cluster.{self.parse_param.get_log_comm()}")
            raise Exception("The node is not in the target cluster.")
        else:
            LOGGER.info(f"db_name: {self.db_name}, hadr_role: {self.hadr_role}.")

    def get_hadr_role(self):
        LOGGER.info("Obtaining primary/standby information from parameter task information.")
        return self.parse_param.get_hadr_role_from_database_node(self.local_database_node)

    def start_hadr(self):
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        start_cmd = f'su - {self.os_user} -c "db2 start hadr on db {self.db_name} as {self.hadr_role}"'
        self.execute_cmd_without_echo(start_cmd)
        LOGGER.info("Succeeded starting  database Hadr.")

    def delete_db_log(self):
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        ori_log_path = Db2CommonUtil.get_log_path_val_of_db(self.os_user, self.db_name)
        del_cmd = f'su - {self.os_user} -c "rm -rf {ori_log_path}*"'
        self.execute_cmd_without_echo(del_cmd)

    def deactivate_db(self):
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        deactivate_cmd = f'su - {self.os_user} -c "db2 deactivate db {self.db_name}"'
        self.execute_cmd_without_raise_ex(deactivate_cmd)
        LOGGER.info("Succeeded deactivating the database.")

    def create_db_when_restore(self, create_db_cmd):
        LOGGER.info(f"Start executing create database command: {create_db_cmd} ...")
        code, out, err = common.execute_cmd(create_db_cmd)
        if code != CMDResult.SUCCESS.value and "already exists" not in out:
            LOGGER.error(f"Execute create database command: {create_db_cmd} failed, return code: {code}, "
                         f"out: {out}, err: {err}.")
            raise ErrCodeException(Db2ErrCode.FAILED_EXECUTE_COMMAND, *[create_db_cmd, out])
        LOGGER.info(f"Execute create database command success, agent id: {self.agent_id}.")

    def stop_hadr(self):
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        stop_cmd = f'su - {self.os_user} -c "db2 stop hadr on db {self.db_name}"'
        self.deactivate_db()
        Db2CommonUtil.disconnect_db_application(self.os_user, self.db_name)
        self.execute_cmd_without_raise_ex(stop_cmd)
        drop_cmd = f'su - {self.os_user} -c "db2 drop db {self.db_name}"'
        self.execute_cmd_without_raise_ex(drop_cmd)
        create_cmd = f'su - {self.os_user} -c "db2 create db {self.db_name}"'
        self.create_db_when_restore(create_cmd)
        LOGGER.info("Succeeded stop hadr.")

    def restore_db_hadr_cfg(self):
        hadr_cfg_dict = {
            HadrCfgField.LOCAL_HOST,
            HadrCfgField.LOCAL_SVC,
            HadrCfgField.REMOTE_HOST,
            HadrCfgField.REMOTE_SVC,
            HadrCfgField.TARGET_LIST
        }
        extend_info = self.local_database_node.get(ParamField.EXTEND_INFO, {})
        if not extend_info:
            raise Exception("The extended information about the database node is empty.")

        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        for cfg_key in hadr_cfg_dict:
            cfg_value = extend_info.get(cfg_key, "")
            if not cfg_value:
                LOGGER.info(f"cfg_key: {cfg_key}, cfg_value: {cfg_value}")
                continue
            update_cfg_cmd = f'su - {self.os_user} -c "db2 update db cfg for {self.db_name} ' \
                             f'using {cfg_key} \'{cfg_value}\'"'
            self.execute_cmd_without_echo(update_cfg_cmd)
        LOGGER.info("Succeeded restoring database configure")

    def get_cluster_node_type(self):
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        hadr_role_cmd = f'su - {self.os_user} -c "db2pd -db {self.db_name} -hadr"'
        out = self.execute_cmd_with_echo(hadr_role_cmd)
        out = self.find_in_list(out.split("\n"), HadrCfgField.ROLE)
        if not out:
            raise Exception("Failed to query the hadr role status.")
        hadr_role = out.split()[-1]
        LOGGER.info(f"Hadr role is {hadr_role}.")
        return hadr_role

    def check_database_is_hadr(self):
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        hadr_cmd = f'su - {self.os_user} -c "db2pd -db {self.db_name} -hadr"'
        if not self.execute_check_cmd(hadr_cmd):
            LOGGER.error("Hadr is not enabled for the database.")
            return False
        return True

    def check_cluster_connectivity(self):
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        hadr_status_cmd = f'su - {self.os_user} -c "db2pd -db {self.db_name} -hadr"'
        out = self.execute_cmd_with_echo(hadr_status_cmd)
        cfg_param = self.find_in_list(out.split("\n"), HadrCfgField.CONNECT_STATUS)
        if not cfg_param:
            LOGGER.error(f"Failed to query the hadr connection status.")
            return False
        hadr_connect_status = cfg_param.split()[-1]
        LOGGER.info(f"The Hadr connection status of the current node is {hadr_connect_status}.")
        if hadr_connect_status != HadrConnectStatus.CONNECTED:
            LOGGER.error(f"Hadr is not connected.{self.parse_param.get_log_comm()}")
            return False

        cfg_param = self.find_in_list(out.split("\n"), HadrCfgField.STATE)
        if not cfg_param:
            LOGGER.error(f"Failed to query the hadr connection status.")
            return False
        hadr_state = cfg_param.split()[-1]
        node_type = self.get_cluster_node_type()
        LOGGER.info(f"The current node status, hadr_state= {hadr_state}, node_type: {node_type}.")
        if hadr_state == HadrStateType.PEER or \
                (hadr_state == HadrStateType.REMOTE_CATCHUP and node_type == HadrRoleType.STANDBY):
            return True
        LOGGER.error(f"The Hadr node is abnormal.{self.parse_param.get_log_comm()}")
        return False

    def check_node_is_primary(self):
        return self.hadr_role == HadrRoleType.PRIMARY

    def check_os_user_exist(self):
        return check_os_user(self.os_user)

    def get_hadr_cfg(self):
        Db2CommonUtil.check_os_user_with_ex(self.os_user)
        cfg_cmd = f'su - {self.os_user} -c "db2 get db cfg for {self.db_name}"'
        ret, database_cfg, _ = common.execute_cmd_oversize_return_value(cfg_cmd)
        if ret != CMDResult.SUCCESS.value:
            LOGGER.error(f"Failed to query the configuration.{self.parse_param.get_log_comm()}")
            return False, {}
        hadr_database_cfg = database_cfg.split('\n')
        db_dict = dict()
        cfg_list = [
            HadrCfgField.LOCAL_HOST,
            HadrCfgField.LOCAL_SVC,
            HadrCfgField.REMOTE_HOST,
            HadrCfgField.REMOTE_SVC,
            HadrCfgField.TARGET_LIST
        ]
        for hadr_database_cfg_message in hadr_database_cfg:
            if ") =" not in hadr_database_cfg_message:
                continue
            tmp_message = hadr_database_cfg_message.split('(')[1]
            tmp_key = tmp_message.split(')')[0]
            if tmp_key in cfg_list:
                tmp_name = tmp_message.split()[-1]
                if tmp_name == "=":
                    tmp_name = ""
                if tmp_key == HadrCfgField.LOCAL_HOST or tmp_key == HadrCfgField.REMOTE_HOST:
                    tmp_name = Db2CommonUtil.domain_conversion_ip(tmp_name)
                db_dict[tmp_key] = tmp_name
        return True, db_dict

    def check_hadr_cfg(self):
        cfg_list = [
            HadrCfgField.LOCAL_HOST,
            HadrCfgField.LOCAL_SVC,
            HadrCfgField.REMOTE_HOST,
            HadrCfgField.REMOTE_SVC,
            HadrCfgField.TARGET_LIST
        ]

        ret, hadr_cfg_dict = self.get_hadr_cfg()
        if not ret or not hadr_cfg_dict:
            LOGGER.error(f"Failed to query the configuration, ret: {ret}.{self.parse_param.get_log_comm()}")
            return False
        for cfg_key in cfg_list:
            node_cfg_value = hadr_cfg_dict.get(cfg_key, "")
            target_cfg_value = self.local_database_node.get(ParamField.EXTEND_INFO, {}).get(cfg_key, "")
            if node_cfg_value != target_cfg_value:
                LOGGER.error(f"Hadr cfg{cfg_key} has been changed.")
                return False
        LOGGER.info("The hadr cfg is verified successfully.")
        return True

    def get_logtarget_path(self):
        return os.path.join(self.parse_param.cache_path, f"{Db2Const.LOG_TARGET}_{self.parse_param.sub_job_id}")

    def get_logtarget_bind_path(self):
        return os.path.join(Db2Const.CACHE_MOUNT_PATH, self.parse_param.job_id, Db2Const.LOG_TARGET)

    def create_logtarget_path(self):
        DpfUtil.create_empty_dir(self.os_user, self.get_logtarget_path())

    def create_logtarget_bind_path(self):
        LOGGER.info("Start create logtarget bind path.")
        if not self.create_dir(Db2Const.CACHE_MOUNT_PATH):
            LOGGER.error(f"Failed to create the cache directory.{self.parse_param.get_log_comm()}")
            return False
        job_path = os.path.join(Db2Const.CACHE_MOUNT_PATH, self.parse_param.job_id)
        if not self.create_dir(job_path):
            LOGGER.error(
                f"Failed to create the directory: {job_path}.{self.parse_param.get_log_comm()}")
            return False
        bind_logtarget_path = os.path.join(Db2Const.CACHE_MOUNT_PATH, self.parse_param.job_id, Db2Const.LOG_TARGET)
        if not self.create_dir(bind_logtarget_path):
            LOGGER.error(
                f"Failed to create the directory: {bind_logtarget_path}.{self.parse_param.get_log_comm()}")
            return False
        LOGGER.info("Succeeded created the binding of the logtarget.")
        return True

    def mount_bind_logtarget(self):
        LOGGER.info("Start to bind the log directory.")
        self.create_logtarget_path()
        if not self.create_logtarget_bind_path():
            LOGGER.error(f"Failed to create the log bind directory.{self.parse_param.get_log_comm()}")
            return False
        return self.mount_bind(self.get_logtarget_path(), self.get_logtarget_bind_path())

    def mount_bind(self, src_path, dst_path):
        if not os.path.exists(src_path):
            LOGGER.error(f"The src path: {src_path} does not exist.{self.parse_param.get_log_comm()}")
            return False
        if not os.path.exists(dst_path):
            LOGGER.error(f"The dst path: {dst_path} does not exist.{self.parse_param.get_log_comm()}")
            return False
        if not Db2CommonUtil.verify_path_trustlist(src_path):
            LOGGER.error(f"Invalid src path: {src_path}.{self.parse_param.get_log_comm()}")
            return False
        if not Db2CommonUtil.verify_path_trustlist(src_path):
            LOGGER.error(f"Invalid dst path: {src_path}.{self.parse_param.get_log_comm()}")
            return False
        mount_cmd = f"mount --bind {src_path} {dst_path}"
        return self.execute_check_cmd(mount_cmd)

    def umount_bind(self, bind_path):
        if not os.path.exists(bind_path):
            return False
        if not Db2CommonUtil.verify_path_trustlist(bind_path):
            LOGGER.error(f"Invalid path: {bind_path}.{self.parse_param.get_log_comm()}")
            return False
        check_cmd = "mount"
        out_info = self.execute_cmd_with_echo(check_cmd)
        mount_info = out_info.split('\n')
        if not self.execute_check_cmd(check_cmd):
            LOGGER.info(f"Mount not exist.{self.parse_param.get_log_comm()}")
            return True
        umount_cmd = f"umount {bind_path}"
        for mount_line in mount_info:
            mount_line_info = mount_line.split(' ')
            if bind_path in mount_line_info:
                return self.execute_check_cmd(umount_cmd)
        return True

    def del_dir(self, dir_path):
        if not Db2CommonUtil.verify_path_trustlist(dir_path):
            LOGGER.error(f"Invalid path: {dir_path}.{self.parse_param.get_log_comm()}")
            return
        if os.path.exists(dir_path):
            if os.path.isdir(dir_path):
                shutil.rmtree(dir_path)
            else:
                os.remove(dir_path)

    def umount_cache_bind(self):
        LOGGER.info("Start umount the cache repository.")
        bind_cache_path = os.path.join(Db2Const.CACHE_MOUNT_PATH, self.parse_param.job_id)
        if not os.path.exists(bind_cache_path):
            return
        logtarget_bind_path = self.get_logtarget_bind_path()
        self.umount_bind(logtarget_bind_path)

        self.del_dir(self.get_logtarget_path())
        self.del_dir(bind_cache_path)

    def clean_resource(self):
        LOGGER.info("Start clean resource.")
        self.umount_cache_bind()
        LOGGER.info("Resources are cleared successfully.")

    def create_dir(self, path):
        if os.path.exists(path):
            return True
        if not Db2CommonUtil.verify_path_trustlist(path):
            LOGGER.error(f"Invalid path: {path}.{self.parse_param.get_log_comm()}")
            return False
        os.mkdir(path)
        os.chmod(path, 0o755)
        return True
