# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.

import os
import argparse
import sys
import importlib
import importlib.metadata

import yaml

from openmind.utils.constants import Stages, FinetuneType, Frameworks
from openmind.utils.import_utils import is_swanlab_available
from openmind.utils.arguments_utils import str2bool
from openmind.utils import logging, is_transformers_available, is_torch_available
from openmind.flow.legacy_arguments import _add_legacy_args, _migrate_legacy_args

if is_torch_available():
    import transformers
    from transformers import HfArgumentParser, Seq2SeqTrainingArguments
    from transformers.trainer_utils import get_last_checkpoint
else:
    from mindformers.trainer.utils import get_last_checkpoint

logger = logging.get_logger(__name__)

_GLOBAL_ARGS = None


def get_args():
    if _GLOBAL_ARGS is None:
        raise ValueError("args is not initialized.")
    return _GLOBAL_ARGS


def initialize_openmind(yaml_path=None, ignore_unknown_args=False, **kwargs):
    args = parse_args(yaml_path, ignore_unknown_args, custom_args=kwargs)
    global _GLOBAL_ARGS
    _GLOBAL_ARGS = args


def parse_args(yaml_path=None, ignore_unknown_args=False, custom_args=None):
    """Parse all arguments."""
    parser = argparse.ArgumentParser(description="openMind Arguments")

    parser = _add_data_args(parser)
    parser = _add_model_args(parser)
    parser = _add_stage_args(parser)
    parser = _add_lora_args(parser)
    parser = _add_train_args(parser)
    parser = _add_generation_args(parser)
    parser = _add_eval_args(parser)
    parser = _add_legacy_args(parser)
    parser = _add_deploy_args(parser)

    unknown_args = None
    if custom_args:
        args = parser.parse_args([])
        for key, value in custom_args.items():
            if hasattr(args, key):
                setattr(args, key, value)
            else:
                raise ValueError(f"Unknown argument: {key}")
    elif yaml_path is not None:
        sys.argv = sys.argv[:1]
        known_args = parse_yaml_file(parser, yaml_path)
        namespace = argparse.Namespace(**known_args)
        args = parser.parse_args(namespace=namespace)
    elif not ignore_unknown_args:
        if sys.argv[0].endswith("openmind-cli"):
            sys.argv.pop(1)
        args = parser.parse_args()
    else:
        args, unknown_args = parser.parse_known_args()

    _migrate_legacy_args(parser, vars(args), unknown_args)
    validate_args(args)
    add_special_args(args)
    return args


def parse_yaml_file(parser, yaml_path):
    """Parse and check yaml arguments"""

    with open(yaml_path, "r") as f:
        yaml_args = yaml.safe_load(f)

    defined_params = {action.dest for action in parser._actions}
    known_args = {k: v for k, v in yaml_args.items() if k in defined_params}
    extra_args = {k: v for k, v in yaml_args.items() if k not in defined_params}

    if extra_args:
        raise ValueError(f"Currently {extra_args} is not supported.")

    return known_args


def add_special_args(args):
    # add Seq2SeqTrainingArguments
    seq2seq_args = None
    if is_transformers_available():
        hf_parser = HfArgumentParser(Seq2SeqTrainingArguments)
        seq2seq_args = hf_parser.parse_dict(vars(args), allow_extra_keys=True)[0]
    setattr(args, "hf_seq2seq_args", seq2seq_args)


def validate_args(args):
    """do sanity check"""

    # Detecting last checkpoint
    if os.path.isdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(args.output_dir)
        if last_checkpoint is None and len(os.listdir(args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({args.output_dir}) already exists and is not empty. "
                "Set overwrite_output_dir true to overcome."
            )
        elif last_checkpoint is not None and args.resume_from_checkpoint is None:
            args.resume_from_checkpoint = last_checkpoint
            logger.info_rank0(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `output_dir` or set `overwrite_output_dir` true to train from scratch."
            )

    if args.use_gradient_checkpointing:
        # When gradient checkpointing was enabled, ddp_find_unused_parameters need to be False.
        # According to: https://github.com/huggingface/peft/issues/313#issuecomment-1517391550
        args.ddp_find_unused_parameters = False

    if getattr(args, "report_to", None) is not None:
        if "swanlab" in args.report_to:
            if not is_swanlab_available():
                raise ModuleNotFoundError(
                    "swanlab module is not installed. Please install it using 'pip install swanlab'."
                )
        elif bool(args.report_to):
            logger.warning_rank0(
                "It is recommended to use swanlab to track experiment, "
                "and other tools have not been fully validated in openMind."
            )

    # resolve conflicts of model_id and model_name_or_path
    if args.model_id and args.model_name_or_path:
        raise ValueError(
            "The parameters 'model_id' and 'model_name_or_path' cannot be passed simultaneously. Please choose one to provide."
        )

    if args.split_special_tokens and args.use_fast_tokenizer:
        raise ValueError("`split_special_tokens` is only supported for slow tokenizers.")

    if args.new_special_tokens is not None:  # support multiple special tokens
        args.new_special_tokens = [token.strip() for token in args.new_special_tokens.split(",")]

    if args.load_in_4bit:
        if not importlib.util.find_spec("bitsandbytes") and not importlib.metadata.metadata("bitsandbytes-npu-beta"):
            raise RuntimeError("Please install bitsandbytes first before quantifying model.")

        if transformers.__version__ < "4.45.0":
            raise ValueError("The version of transformers is required at least 4.45.0 to run quantization.")

    # stage and finetune type
    if args.stage not in [Stages.SFT, Stages.PT]:
        raise ValueError(f"Currently supported stage list  is [{Stages.SFT, Stages.PT}]")
    if args.finetuning_type not in [FinetuneType.FULL, FinetuneType.LORA]:
        raise ValueError(f"Currently supported fine-tuning method list is [{FinetuneType.FULL}, {FinetuneType.LORA}]")
    if args.finetuning_type != FinetuneType.LORA and args.use_dora:
        raise ValueError("`use_dora` is only valid for LoRA training, please set `finetuning_type` to `lora`.")

    if args.cutoff_len <= 0:
        raise ValueError(
            f"cutoff_len must be a positive number. Received value: {args.cutoff_len}."
            "Please ensure that cutoff_len is greater than 0."
        )

    if args.max_length is not None and args.do_train:
        if args.max_length < args.cutoff_len:
            logger.warning_rank0(
                f"Set max_length to {args.cutoff_len} to ensure it meets or exceeds the value of cutoff_len."
            )
            args.max_length = args.cutoff_len
    elif args.max_length is None and not args.do_train:
        args.max_length = 1024

    # When ASCEND_RT_VISIBLE_DEVICES is set to "5,6", the process will recognize devices 5 and 6 as device 0 and 1 during runtime
    # set args.device to str as device: 0 in yaml file will be parsed as int

    limited_devices = os.getenv("ASCEND_RT_VISIBLE_DEVICES", None)
    if limited_devices is not None:
        logger.info_rank0(
            f"environment param ASCEND_RT_VISIBLE_DEVICES is set to {limited_devices}, and param device will be ignored."
        )
    else:
        if args.device is not None:
            if args.device == "cpu":
                pass
            # 0->[0], "npu:0"->[0], "0, npu:1"->[0, 1], "NPU:2, 3"->[2, 3]
            else:
                args.device = [
                    int(part.strip().lower().split("npu:")[-1].strip()) for part in str(args.device).split(",")
                ]
                if len(args.device) == 1:
                    args.device = args.device[0]
        else:
            args.device = 0

    if args.bf16 and args.fp16:
        raise ValueError(
            "Cannot enable both BF16 and FP16 precision modes simultaneously. "
            "Please choose either --bf16 or --fp16, not both."
        )


def _add_data_args(parser):
    group = parser.add_argument_group(title="data")

    group.add_argument("--dataset", type=str, default=None, help="The name of the dataset.")
    group.add_argument("--eval_dataset", type=str, default=None, help="The name of dataset(s) to use for evaluation.")
    group.add_argument(
        "--custom_dataset_info",
        type=str,
        default=None,
        help="The absolute path of the customized JSON file,"
        "externally inputted by the user."
        "The format for supporting multiple datasets is as follows:"
        "train_1,train_2.",
    )
    group.add_argument(
        "--subset_name",
        type=str,
        default=None,
        help="Name of the sub-dataset in the dataset. When it is necessary"
        "to specify the download of one of multiple sub-datasets.",
    )
    group.add_argument(
        "--split",
        type=str,
        default="train",
        help="Load a subset of the partitioned dataset, such as 'train'."
        "If the value is None, it will return all the data included.",
    )
    group.add_argument(
        "--preprocessing_num_workers",
        type=int,
        default=None,
        help="The number of processes to use for the data processing.",
    )
    group.add_argument(
        "--preprocessing_batch_size",
        type=int,
        default=1000,
        help="The number of examples in one group in pre-processing.",
    )
    group.add_argument(
        "--cutoff_len", type=int, default=1024, help="The cutoff length of the tokenized inputs in the dataset."
    )
    group.add_argument(
        "--max_length",
        type=int,
        default=None,
        help="If `do_train=True`, pad to a maximum length specified with the argument `max_length`，when `cutoff_len` is set, the maximum length equals `max(cutoff_len, max_length)`. "
        "If `do_train=False`, this specifies the maximum length of generated tokens.",
    )
    group.add_argument(
        "--reserved_label_len",
        type=int,
        default=1,
        help="The minimum cutoff length reserved for the tokenized labels in the dataset.",
    )
    group.add_argument(
        "--ignore_pad_token_for_loss",
        type=str2bool,
        default=True,
        help="Whether or not to ignore the tokens corresponding to the pad label" "in loss computation.",
    )
    group.add_argument("--packing", type=str2bool, default=False, help="Enable sequences packing in training.")
    group.add_argument("--default_system", type=str, default=None, help="The default system of template to use.")
    group.add_argument("--tool_format", type=str, default=None, help="The tool format of template to use.")

    return parser


def _add_model_args(parser):
    group = parser.add_argument_group(title="model")

    group.add_argument(
        "--model_id",
        type=str,
        default=None,
        help="Used to specify the id for the model, such as 'telechat-7b-pt', 'llama3-7b-chat'. "
        "If this parameter is not specified, try to query the registered template through model_name_or_path. "
        "If neither is available, use the common model template.",
    )
    group.add_argument(
        "--model_name_or_path",
        type=str,
        default=None,
        help="The local path of the model or its name in the hub, "
        "such as /home/models/Telechat-7B-pt or TeleAI/Telechat-7B-pt",
    )
    group.add_argument(
        "--template",
        type=str,
        default=None,
        help="The template of model applied to datasets, such as 'qwen2', 'llama'.",
    )
    group.add_argument(
        "--load_in_4bit", type=str2bool, default=False, help="Support for QLoRA, load the model in 4bits precision"
    )
    group.add_argument(
        "--trust_remote_code",
        type=str2bool,
        default=False,
        help="Whether to trust the execution of code from datasets/models defined on the Hub. "
        "This option should only be set to `True` for repositories you trust and in which you have read the code, "
        "as it will execute code present on the Hub on your local machine.",
    )
    group.add_argument("--cache_dir", type=str, default=None, help="Cache directory of downloaded models")
    group.add_argument(
        "--model_revision",
        type=str,
        default="main",
        help="The specific model version to use (can be a branch name, tag name or commit id).",
    )
    group.add_argument(
        "--use_fast_tokenizer",
        type=str2bool,
        default=True,
        help="Whether or not to use one of the fast tokenizer (backed by the tokenizers library).",
    )
    group.add_argument(
        "--split_special_tokens",
        type=str2bool,
        default=False,
        help="Whether or not the special tokens should be split during the tokenization process.",
    )
    group.add_argument(
        "--new_special_tokens",
        type=str,
        default=None,
        help="Special tokens to be added into the tokenizer. Use commas to separate multiple tokens.",
    )
    group.add_argument(
        "--resize_vocab", type=str2bool, default=False, help="Whether or not to resize the tokenizer vocab."
    )
    group.add_argument(
        "--use_gradient_checkpointing",
        type=str2bool,
        default=True,
        help="Whether or not to use gradient checkpointing.",
    )
    group.add_argument("--adapter_name_or_path", type=str, default=None, help="The list of the adapter model to use.")
    group.add_argument("--per_shard_size", type=int, default=None, help="Maximum size of each safetensors file.")
    group.add_argument(
        "--token", type=str, default=None, help="The modelers.cn token to download model from private repo."
    )
    group.add_argument("--use_npu_fusion_attention", type=str2bool, default=True, help="Use npu fusion attention.")
    group.add_argument("--use_fused_rms_norm", type=str2bool, default=True, help="Use npu fused RMSNorm.")
    group.add_argument("--use_fused_rope", type=str2bool, default=True, help="Use npu fused RoPE.")
    group.add_argument("--use_fused_swiglu", type=str2bool, default=True, help="Use npu fused SwiGLU.")
    group.add_argument("--load_checkpoint", type=str2bool, default=False, help="Whether load checkpoint.")
    group.add_argument("--ckpt_path", default="./ckpt/checkpoints_dpo", help="Checkpoint_path.")
    group.add_argument("--device", type=str, default=None, help="Specify a device to run generation.")
    group.add_argument(
        "--low_cpu_mem_usage",
        type=str2bool,
        default=True,
        help="Tries not to use more than 1x model size in CPU memory (including peak memory) while loading the model",
    )
    group.add_argument("--print_param_status", type=str2bool, default=False, help="Print model parameters status.")
    group.add_argument("--offload_folder", type=str, default=None, help="Path to offload model weights.")

    return parser


def _add_stage_args(parser):
    group = parser.add_argument_group(title="stage")

    group.add_argument(
        "--stage",
        default=Stages.SFT,
        help="Which stage will be used in training," "currently only 'pt' and 'sft' are supported",
    )
    group.add_argument("--framework", type=str, default=Frameworks.pt, help="Which framework will be used.")

    return parser


def _add_lora_args(parser):
    group = parser.add_argument_group(title="lora")

    group.add_argument("--finetuning_type", default=FinetuneType.FULL, help="Which method to use.")
    group.add_argument(
        "--lora_target_modules",
        type=str,
        default=None,
        help="The target modules for LoRA fine-tuning. If you use model_id, it will use default lora_target_modules. "
        "If you set to 'all', it will use all possible target modules in the model. "
        "If you set specific values, such as 'q_proj, v_proj', it will be applied to models.",
    )
    group.add_argument(
        "--lora_alpha", type=int, default=None, help="The scale factor for LoRA fine-tuning (default: lora_rank * 2)."
    )
    group.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout rate for the LoRA fine-tuning.")
    group.add_argument("--lora_rank", type=int, default=8, help="The intrinsic dimension for LoRA fine-tuning.")
    group.add_argument(
        "--use_dora",
        type=str2bool,
        default=False,
        help="Whether or not to use the weight-decomposed lora method (DoRA).",
    )

    return parser


def _add_train_args(parser):
    group = parser.add_argument_group(title="training")

    # following are transformers args, do not rename
    group.add_argument(
        "--output_dir",
        type=str,
        default="./saves",
        help="The output directory where the model predictions and checkpoints will be written.",
    )
    group.add_argument(
        "--overwrite_output_dir",
        type=str2bool,
        default=False,
        help="Overwrite the content of the output directory."
        "Use this to continue training if output_dir points to a checkpoint directory.",
    )
    group.add_argument("--do_train", type=str2bool, default=False, help="Whether to run training.")
    group.add_argument("--do_eval", type=str2bool, default=False, help="Whether to run eval on the dev set.")
    group.add_argument("--do_predict", type=str2bool, default=False, help="Whether to run predictions on the test set.")
    group.add_argument("--eval_strategy", type=str, default="no", help="The evaluation strategy to use.")
    group.add_argument(
        "--eval_steps",
        type=float,
        default=None,
        help="Number of update steps between two evaluations if eval_strategy='steps'.",
    )
    group.add_argument(
        "--per_device_train_batch_size", type=int, default=8, help="Batch size per GPU/TPU core/CPU for training."
    )
    group.add_argument(
        "--per_device_eval_batch_size", type=int, default=8, help="Batch size per GPU/TPU core/CPU for evaluation."
    )
    group.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help="Number of updates steps to accumulate before performing a backward/update pass.",
    )
    group.add_argument("--learning_rate", type=float, default=5e-5, help="The initial learning rate for Adam.")
    group.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay if we apply some.")
    group.add_argument("--adam_beta1", type=float, default=0.9, help="Beta1 for Adam optimizer.")
    group.add_argument("--adam_beta2", type=float, default=0.999, help="Beta2 for Adam optimizer.")
    group.add_argument("--adam_epsilon", type=float, default=1e-8, help="Epsilon for Adam optimizer.")
    group.add_argument("--max_grad_norm", type=float, default=1.0, help="Max gradient norm.")
    group.add_argument(
        "--num_train_epochs", type=float, default=3.0, help="Total number of training epochs to perform."
    )
    group.add_argument(
        "--max_steps",
        type=int,
        default=-1,
        help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
    )
    group.add_argument("--warmup_steps", type=int, default=0, help="Linear warmup over warmup_steps.")
    group.add_argument(
        "--warmup_ratio",
        type=float,
        default=0.0,
        help="Ratio of total training steps used for a linear warmup from 0 to `learning_rate`.",
    )
    group.add_argument("--lr_scheduler_type", type=str, default="linear", help="The scheduler type to use")
    group.add_argument("--logging_dir", type=str, default=None, help="Tensorboard log dir.")
    group.add_argument("--logging_first_step", type=str2bool, default=False, help="Log the first global_step.")
    group.add_argument("--logging_steps", type=int, default=1, help="Log every X updates steps.")
    group.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
    group.add_argument("--seed", type=int, default=1234, help="Random seed for initialization.")
    group.add_argument(
        "--fp16",
        type=str2bool,
        default=False,
        help="Whether to use 16-bit (mixed) precision (through NVIDIA Apex) instead of 32-bit.",
    )
    group.add_argument(
        "--bf16",
        type=str2bool,
        default=False,
        help="Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training.",
    )
    group.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank.")
    group.add_argument(
        "--deepspeed",
        type=str,
        default=None,
        help="Enable deepspeed and pass the path to deepspeed json config file (e.g. ds_config.json).",
    )
    group.add_argument(
        "--report_to", default=None, help="Whether to enable training tracking, currently only supported by SwanLab."
    )
    group.add_argument("--local_process_index", default=None, help="local_process_index")
    group.add_argument(
        "--resume_from_checkpoint", default=None, help="The path to a folder with a valid checkpoint for your model."
    )
    group.add_argument(
        "--ddp_timeout",
        type=int,
        default=1800,
        help="Overrides the default timeout for distributed training (value should be given in seconds).",
    )
    group.add_argument(
        "--ddp_find_unused_parameters",
        type=str2bool,
        default=True,
        help="When using distributed training, the value of the flag find_unused_parameters passed to DistributedDataParallel. Will default to False if gradient checkpointing is used, True otherwise.",
    )
    group.add_argument("--save_strategy", default="no", help="The checkpoint save strategy to use.")

    return parser


def _add_generation_args(parser):
    group = parser.add_argument_group(title="generation")
    group.add_argument("--backend", type=str, default=None, help="Specify a backend to run generation.")
    group.add_argument(
        "--do_sample",
        type=str2bool,
        default=True,
        help="Whether or not to use sampling, use greedy decoding otherwise.",
    )
    group.add_argument(
        "--temperature", type=float, default=0.95, help="The value used to modulate the next token probabilities."
    )
    group.add_argument(
        "--top_p",
        type=float,
        default=0.7,
        help="The smallest set of most probable tokens with probabilities that add up to top_p or higher are kept.",
    )
    group.add_argument(
        "--top_k",
        type=int,
        default=50,
        help="The number of highest probability vocabulary tokens to keep for top-k filtering.",
    )
    group.add_argument(
        "--num_beams", type=int, default=1, help="Number of beams for beam search. 1 means no beam search."
    )
    group.add_argument(
        "--max_new_tokens",
        type=int,
        default=1024,
        help="The maximum number of tokens to generate, ignoring the number of tokens in the prompt.",
    )
    group.add_argument(
        "--repetition_penalty",
        type=float,
        default=1.0,
        help="The parameter for repetition penalty. 1.0 means no penalty.",
    )
    group.add_argument(
        "--length_penalty",
        type=float,
        default=1.0,
        help="Exponential penalty to the length that is used with beam-based generation.",
    )
    group.add_argument(
        "--skip_special_tokens",
        type=str2bool,
        default=True,
        help="Whether or not to remove special tokens in the decoding.",
    )
    group.add_argument(
        "--use_cache",
        type=str2bool,
        default=True,
        help="Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.",
    )

    return parser


def _add_eval_args(parser):
    group = parser.add_argument_group(title="eval")

    group.add_argument("--batch_size", type=int, help="The batch_size to do evaluation.")
    group.add_argument(
        "--limit",
        type=int,
        help="Allowed number of samples used by each task. This parameter is used only to limit the number of samples to reduce the evaluation time and verify whether the function is normal. The model capability cannot be evaluated.",
    )
    group.add_argument("--tasks", type=str, help="Tasks to be evaluated. Usage method: --tasks task1,task2.")

    return parser


def _add_deploy_args(parser):
    group = parser.add_argument_group(title="deploy")
    group.add_argument(
        "--port",
        type=int,
        default=1025,
        help="port for the service-oriented deployment",
    )
    group.add_argument(
        "--world_size",
        default=1,
        type=int,
        help="npu world size",
    )
    return parser
