# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.


from typing import List, Optional

from transformers import TrainerCallback

from trl import DPOTrainer

from openmind.utils import get_logger, is_peft_available
from openmind.utils.constants import FinetuneType
from openmind.flow.model import get_model, get_tokenizer
from openmind.flow.datasets import get_template, get_dataset_module, fix_tokenizer_with_template
from openmind.flow.arguments import get_args

logger = get_logger(__name__)

if is_peft_available():
    from peft import PeftConfig, LoraConfig


def get_peft_config() -> "Optional[PeftConfig]":
    args = get_args()
    if args.finetuning_type != FinetuneType.LORA:
        return None

    if not is_peft_available():
        raise ValueError(
            "You need to have PEFT library installed in your environment, make sure to install `peft`. "
            "Make sure to run `pip install -U peft`."
        )

    peft_config = LoraConfig(
        task_type="CAUSAL_LM",
        r=args.lora_rank,
        target_modules=args.lora_target_modules,
        lora_alpha=args.lora_alpha,
        lora_dropout=args.lora_dropout,
        use_dora=args.use_dora,
    )
    return peft_config


def run_dpo(
    callbacks: Optional[List["TrainerCallback"]] = None,
):
    tokenizer = get_tokenizer()

    template = get_template()

    fix_tokenizer_with_template(tokenizer, template)

    dataset_module = get_dataset_module(tokenizer, template)

    args = get_args()

    peft_config = get_peft_config()

    model = get_model()

    logger.info_rank0(f"*******DPO Args: {args.dpo_args} ***********")

    # if peft config provided, ref model should be None
    if args.finetuning_type == FinetuneType.LORA:
        ref_model = None
    else:
        ref_model = get_model()

    trainer = DPOTrainer(
        args=args.dpo_args,
        processing_class=tokenizer,
        model=model,
        ref_model=ref_model,
        peft_config=peft_config,
        **dataset_module,
    )

    if args.do_train:
        logger.info_rank0("Start DPO training.")
        train_result = trainer.train()
        trainer.save_model(args.output_dir)
        trainer.log_metrics("train", train_result.metrics)
        trainer.save_metrics("train", train_result.metrics)
        trainer.save_state()
