# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.

from typing import List, Optional

import torch.distributed as dist
from transformers import TrainerCallback

from openmind.utils import get_logger
from openmind.utils.constants import IGNORE_INDEX
from openmind.flow.model import get_model, get_tokenizer_and_processor
from openmind.flow.model.sequence_parallel.seq_utils import DistributedTrainingModule
from openmind.flow.datasets import get_template, get_dataset_module
from openmind.flow.arguments import get_args
from openmind.flow.datasets.collator import SFTDataCollatorWith4DAttentionMask
from openmind.flow.train.sft.seq_utils import CustomSeq2SeqTrainer

logger = get_logger(__name__)


def run_sft(
    callbacks: Optional[List["TrainerCallback"]] = None,
):
    args = get_args()

    if args.sequence_parallel_size > 1:
        DistributedTrainingModule.initialize_sequence_parallel(
            dist.get_world_size(), dist.get_rank(), args.sequence_parallel_size
        )

    tokenizer, processor = get_tokenizer_and_processor()
    model = get_model()
    template = get_template()
    dataset_module = get_dataset_module(tokenizer, template, processor)

    data_collator = SFTDataCollatorWith4DAttentionMask(
        template=template,
        tokenizer=tokenizer,
        model=model,
        processor=processor,
        padding="max_length" if args.max_length else True,
        pad_to_multiple_of=8 if args.do_train else None,
        max_length=(
            args.max_length if args.sequence_parallel_size == 1 else args.max_length // args.sequence_parallel_size
        ),
        label_pad_token_id=IGNORE_INDEX if args.ignore_pad_token_for_loss else tokenizer.pad_token_id,
        require_position_ids=args.sequence_parallel_size > 1,
    )

    trainer = CustomSeq2SeqTrainer(
        model=model,
        args=args.hf_seq2seq_args,
        tokenizer=tokenizer,
        data_collator=data_collator,
        callbacks=callbacks,
        **dataset_module,
    )

    if args.do_train:
        logger.info_rank0("Start training.")
        train_result = trainer.train(resume_from_checkpoint=args.resume_from_checkpoint)
        trainer.save_model()
        trainer.log_metrics("train", train_result.metrics)
        trainer.save_metrics("train", train_result.metrics)
        trainer.save_state()
