#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import sys

from common import cleaner
from common.common import exter_attack
from common.common_models import ActionResult
from common.const import ExecuteResultEnum, SysData
from common.logger import Logger
from common.parse_parafile import ParamFileUtil
from db2.backup.util.dpf_backup_util import DpfBackupUtil
from db2.comm.const import Db2JobName, Db2CommonConst, Db2Const
from db2.restore.dpf.dpf_ts_restore import DpfTsRestore
from db2.restore.single.single_ts_restore import SingleTsRestore
from db2.restore.ha.ha_ts_restore import HaTsRestore
from db2.restore.hadr.hadr_ts_restore import HadrTsRestore


LOGGER = Logger().get_logger(filename="db2.log")


@exter_attack
def distribute_task(task_name, pid, job_id, param_dict, sub_job_id=None):
    cluster_type = DpfBackupUtil.get_cluster_type(param_dict)
    instance_type = {
        Db2Const.SINGLE: SingleTsRestore,
        Db2Const.DPF_CLUSTER_TYPE: DpfTsRestore,
        Db2Const.POWERHA: HaTsRestore,
        Db2Const.RHELHA: HaTsRestore,
        Db2Const.HADR: HadrTsRestore
    }
    LOGGER.info(f"cluster_type: {cluster_type}")
    db2_ts_restore_inst = None
    if cluster_type in instance_type.keys():
        db2_ts_restore_inst = instance_type.get(cluster_type)(task_name, pid, job_id, sub_job_id, param_dict)

    if not db2_ts_restore_inst:
        db2_ts_restore_inst = SingleTsRestore(task_name, pid, job_id, sub_job_id, param_dict)
    job_name_func_mappings = {
        Db2JobName.ALLOW_RESTORE_IN_LOCAL: {
            "exec_job_func": db2_ts_restore_inst.allow_restore_in_local_node,
            "output_result_func": db2_ts_restore_inst.output_action_result
        },
        Db2JobName.RESTORE_PRE: {
            "exec_job_func": db2_ts_restore_inst.exec_restore_pre_job,
            "output_result_func": db2_ts_restore_inst.output_action_result
        },
        Db2JobName.RESTORE: {
            "exec_job_func": db2_ts_restore_inst.exec_restore,
            "output_result_func": db2_ts_restore_inst.output_action_result
        },
        Db2JobName.RESTORE_POST: {
            "exec_job_func": db2_ts_restore_inst.exec_restore_post_job,
            "output_result_func": db2_ts_restore_inst.output_action_result
        },
        Db2JobName.RESTORE_GEN_SUB: {
            "exec_job_func": db2_ts_restore_inst.exec_restore_gen_sub_job,
            "output_result_func": db2_ts_restore_inst.output_action_result
        }
    }
    if task_name in Db2CommonConst.RESTORE_PROGRESS_ACTIONS:
        db2_ts_restore_inst.report_progress()
        return
    job_func_dict = job_name_func_mappings.get(task_name)
    if not job_func_dict:
        LOGGER.error(f"Unsupported job action: {task_name}, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}")
        err_msg = f"Unsupported job action: {task_name}."
        DpfBackupUtil.record_task_result(pid, err_msg, code=ExecuteResultEnum.INTERNAL_ERROR.value)
        return
    # 调用任务对应的执行函数
    action_ret = job_func_dict["exec_job_func"]()
    if job_func_dict.get("output_result_func") and isinstance(action_ret, ActionResult):
        job_func_dict["output_result_func"](action_ret)


@exter_attack
def main():
    args = sys.argv[1:]
    for line in sys.stdin:
        SysData.SYS_STDIN = line
        break
    # 参数个数为3或者4
    if len(args) not in (3, 4):
        LOGGER.error(f"Param number error! Arguments: {args}")
        return ExecuteResultEnum.INTERNAL_ERROR.value
    task_name, pid, job_id = args[0], args[1], args[2]
    input_sub_job_id = args[3] if len(args) == 4 else None
    LOGGER.info(f"Start executing task, task name: {task_name}, pid: {pid}, job_id: {job_id}, "
                f"sub_job_id: {input_sub_job_id}.")
    try:
        param_dict = ParamFileUtil.parse_param_file(pid)
    except Exception:
        LOGGER.exception(f"Parse param file failed, task name: {task_name}, pid: {pid}.")
        err_msg = "Parse parameter file failed."
        DpfBackupUtil.record_task_result(pid, err_msg, code=ExecuteResultEnum.INTERNAL_ERROR.value)
        return ExecuteResultEnum.INTERNAL_ERROR.value
    try:
        distribute_task(task_name, pid, job_id, param_dict, sub_job_id=input_sub_job_id)
    except Exception:
        LOGGER.exception(f"Distribute DB2 tablespaces restore task failed. Task Name: {task_name}, Pid: {pid}, "
                         f"Job ID: {job_id}, Sub Job ID: {input_sub_job_id}.")
        return ExecuteResultEnum.INTERNAL_ERROR.value
    LOGGER.info(f"Execute task success. Task Name: {task_name}, Pid: {pid}, Job ID: {job_id}, "
                f"Sub Job ID: {input_sub_job_id}.")
    return ExecuteResultEnum.SUCCESS.value


if __name__ == '__main__':
    try:
        sys.exit(main())
    finally:
        cleaner.clear(SysData.SYS_STDIN)
        LOGGER.debug('Clear environment variables successfully.')
