# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.


import argparse
import textwrap
from collections import OrderedDict

from openmind.utils import logging
from openmind.cli.eval.lmeval import LMEval
from openmind.cli.subcommand import SubCommand

logger = logging.get_logger(name=__name__, allow_line_separator=True)
logging.set_verbosity_info()

LMEVAL = "lmeval"
SUPPORT_BACKEND = (LMEVAL,)

TASK_DATASET_MAPPING = OrderedDict(
    [
        # task, {dataset repo id on hf: dataset repo id on om}
        # You can find the repo id of the corresponding dataset in the `dataset_path` field of
        # lm-evaluation-harness/lm_eval/tasks/<specific task>/<task_name.yaml>.
        # For example, the repo id of the arithmetic dataset can be found here:
        # https://github.com/EleutherAI/lm-evaluation-harness/blob/2a6acc88a0c31be7734aec85b17555323b70c049/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml#L4
        ("arithmetic", {"EleutherAI/arithmetic": "Datasets2024/arithmetic"}),
        ("gsm8k", {"gsm8k": "ILoveDataset/gsm8k"}),
        ("mmlu", {"hails/mmlu_no_train": "ILoveDataset/mmlu"}),
        ("mgsm_cot_native", {"juletxara/mgsm": "ILoveDataset/mgsm"}),
        ("mgsm_direct", {"juletxara/mgsm": "ILoveDataset/mgsm"}),
        ("truthfulqa", {"truthful_qa": "ILoveDataset/truthful_qa"}),
        ("hellaswag", {"hellaswag": "ILoveDataset/hellaswag"}),
        ("ai2_arc", {"allenai/ai2_arc": "ILoveDataset/ai2_arc"}),
    ]
)


class Eval(SubCommand):
    """Holds all the logic for the `openmind-cli eval` subcommand."""

    def __init__(self, subparsers: argparse._SubParsersAction):
        super().__init__()
        self._parser = subparsers.add_parser(
            "eval",
            prog="openmind-cli eval",
            help="EleutherAI LLM Evaluation Harness.",
            description="EleutherAI LLM Evaluation Harness.",
            epilog=textwrap.dedent(
                """\
            examples:
                $ openmind-cli eval --backend lmeval --model Baichuan/Baichuan2_7b_chat_pt --device npu:0 --tasks arithmetic --batch_size 8
                ...
            """
            ),
            formatter_class=argparse.RawTextHelpFormatter,
        )
        self._add_arguments()
        self._parser.set_defaults(func=self._eval_cmd)

    def str_to_bool_for_trust_remote_code(self, value):
        if isinstance(value, bool):
            return value
        if value.lower() in ("true", "1"):
            return True
        elif value.lower() in ("false", "0"):
            return False
        else:
            raise ValueError(f"Invalid value for --trust_remote_code: {value}.")

    def _add_arguments(self) -> None:
        """Add arguments to the parser."""
        self._parser.add_argument(
            "--backend",
            type=str,
            default="lmeval",
            help="Evaluate backend.",
        )
        self._parser.add_argument(
            "--model",
            type=str,
            default=None,
            help=(
                "The pre-trained or finetuned language model to evaluate. "
                "Can be either a string, the model id of a model hosted inside a model repo on openMind Hub, "
                "or a path to a directory containing pre-trained model."
            ),
        )
        self._parser.add_argument(
            "--tasks",
            type=str,
            default=None,
            metavar="task1,task2",
            help="List lm-eluther tasks to evaluate usage: --tasks task1,task2.",
        )
        self._parser.add_argument(
            "--batch_size",
            type=str,
            default=1,
            metavar="auto|auto:N|N",
            help="Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.",
        )
        self._parser.add_argument(
            "--device",
            type=str,
            default="npu:0",
            help="Device to use (e.g. npu:0, cpu).",
        )
        self._parser.add_argument(
            "--limit",
            type=int,
            default=None,
            help="Limit the number of examples per task.",
        )
        self._parser.add_argument(
            "--trust_remote_code",
            type=self.str_to_bool_for_trust_remote_code,
            default=True,
            help=(
                "Whether or not to allow for custom models defined on the Hub in their own modeling files. "
                "This option should only be set to 1 or true for repositories you trust and in which you have read the code, "
                "as it will execute code present on the Hub on your local machine.",
            ),
        )

    def _eval_cmd(self, args: argparse.Namespace) -> None:
        """Evaluate LLM with EleutherAI's llm evaluation harness."""
        if args.backend in SUPPORT_BACKEND:
            if args.backend == LMEVAL:
                LMEval().evaluate(args)
        else:
            logger.error(f"Eval interface currently supports {SUPPORT_BACKEND} as backend")
