# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.

import os
import json
from functools import partial
from typing import Optional, List, Dict, Literal

import numpy as np
from datasets import Dataset
from transformers import AutoTokenizer, ProcessorMixin
from openmind.utils import get_logger
from openmind.utils.constants import DATASET_INFO_CONFIG, Stages
from openmind.flow.datasets.preprocess import (
    align_dataset,
    merge_datasets,
    preprocess_supervised_dataset,
    preprocess_pretrain_dataset,
    preprocess_pairwise_dataset,
    preprocess_reward_dataset,
)
from openmind.flow.arguments import get_args
from openmind.flow.datasets.template import Template
from openmind.flow.datasets.parser import get_dataset_attr
from openmind.flow.datasets.preprocess.sequence_parallel import pad_sequence, sp_split
from openmind.utils.loader_utils import get_platform_loader

logger = get_logger(__name__)


class DatasetFormatConfig:
    def __init__(self, required_columns: List[str], allowed_columns: List[str]):
        self.required_columns = required_columns
        self.allowed_columns = allowed_columns

    def is_valid(self, dataset_columns: List[str]):
        if set(dataset_columns).issubset(self.allowed_columns) and all(
            col in dataset_columns for col in self.required_columns
        ):
            return True
        return False


DATASET_FORMAT_REGISTRY: Dict[str, DatasetFormatConfig] = {
    "alpaca": DatasetFormatConfig(
        required_columns=["instruction", "output"],
        allowed_columns=["instruction", "input", "output", "history", "system", "tools", "text"],
    ),
    "sharegpt": DatasetFormatConfig(
        required_columns=["conversations"], allowed_columns=["conversations", "system", "tools"]
    ),
    "text": DatasetFormatConfig(required_columns=["text"], allowed_columns=["text"]),
    "pairwise": DatasetFormatConfig(
        required_columns=["chosen", "rejected"],
        allowed_columns=["prompt", "chosen", "rejected", "response", "system", "tools"],
    ),
}


def _check_dataset_format(dataset_columns: List[str]):
    for format_type, format_config in DATASET_FORMAT_REGISTRY.items():
        if format_config.is_valid(dataset_columns):
            return format_type
    raise ValueError(
        "The format of your dataset does not comply with the specified requirements for the custom dataset format."
    )


def _get_merged_datasets(dataset_names: Optional[str]):
    args = get_args()
    if dataset_names is None:
        return None
    current_path = os.path.dirname(os.path.abspath(__file__))
    dataset_info_path = os.path.join(current_path, "../configs", DATASET_INFO_CONFIG)
    with open(dataset_info_path, "r") as f:
        dataset_info = json.load(f)

    if args.custom_dataset_info is not None:
        custom_dataset_path_list = [
            custom_dataset_path.strip(" ") for custom_dataset_path in args.custom_dataset_info.split(",")
        ]
        for custom_dataset_path in custom_dataset_path_list:
            with open(custom_dataset_path, "r") as f:
                custom_dataset_info = json.load(f)
                dataset_info.update(custom_dataset_info)

    dataset_list = [dataset.strip(" ") for dataset in dataset_names.split(",")]
    aligned_datasets = []
    for name in dataset_list:
        dataset_attr = get_dataset_attr(name, dataset_info)
        load_dataset_func, openmind_platform = get_platform_loader("dataset")

        # if dataset in dataset_info.json, dataset_attr.load_from = {"modelers":xxx, "huggingface":xxx}
        if isinstance(dataset_attr.load_from, Dict):
            dataset_attr.load_from = dataset_attr.load_from[openmind_platform]

        dataset = load_dataset_func(
            path=dataset_attr.load_from,
            data_files=dataset_attr.file_name,
            name=args.subset_name,
            split=dataset_attr.split,
        )

        # The columns of the original dataset can be deleted because only the tokenized data is saved.
        column_names = dataset.column_names
        logger.debug("Column_names to be deleted = {}".format(column_names))

        if dataset_attr.num_samples is not None:
            target_num = min(dataset_attr.num_samples, len(dataset))
            indexes = np.random.permutation(len(dataset))[:target_num]

            dataset = dataset.select(indexes)
            logger.info("Sampled {} examples from dataset {}.".format(target_num, dataset_attr))

        if dataset_attr.is_custom:
            dataset_attr.formatting = _check_dataset_format(column_names)

        aligned_datasets.append(align_dataset(dataset_attr, dataset))
    return merge_datasets(aligned_datasets)


def _get_preprocessed_dataset(
    dataset: Dataset,
    template: Template,
    tokenizer: AutoTokenizer,
    processor: Optional[ProcessorMixin] = None,
):
    args = get_args()

    if dataset is None:
        return None
    column_names = dataset.column_names
    preprocess_kwargs = dict(
        num_proc=args.preprocessing_num_workers,
        load_from_cache_file=args.local_process_index != 0,
        desc="Start running tokenizer on datasets",
    )
    preprocess_func = _get_preprocess_func(template, tokenizer, processor)
    logger.info_rank0(f"\n******removed columes: {column_names} *********\n")
    if args.stage == Stages.RM:
        dataset = preprocess_func(dataset=dataset, args=args)
    else:
        dataset = dataset.map(
            preprocess_func,
            batched=True,
            batch_size=args.preprocessing_batch_size,
            remove_columns=column_names,
            **preprocess_kwargs,
        )
    logger.info_rank0(f"\n******processed new columes: {dataset.column_names} *********\n")
    # print datasets example applied template
    if args.stage in [Stages.SFT, Stages.PT]:
        logger.info_rank0("\ninput:\n{}".format(tokenizer.decode(dataset["input_ids"][0])))
        logger.info_rank0("\ninput_ids:\n{}\n".format(dataset["input_ids"][0]))
    if args.stage == Stages.RM:
        logger.info_rank0("\nchosen input:\n{}".format(dataset["chosen"][0]))
        logger.info_rank0("\nrejected input:\n{}".format(dataset["rejected"][0]))
    return dataset


def _get_preprocess_func(template, tokenizer, processor):
    args = get_args()

    if args.stage == Stages.PT:
        preprocess_func = partial(preprocess_pretrain_dataset, tokenizer=tokenizer)
    elif args.stage == Stages.SFT:
        preprocess_func = partial(
            preprocess_supervised_dataset, template=template, tokenizer=tokenizer, processor=processor
        )
    elif args.stage == Stages.DPO:
        preprocess_func = partial(
            preprocess_pairwise_dataset, template=template, tokenizer=tokenizer, cutoff_len=args.cutoff_len
        )
    elif args.stage == Stages.RM:
        preprocess_func = partial(preprocess_reward_dataset, tokenizer=tokenizer)

    else:
        raise NotImplementedError
    return preprocess_func


def _get_sequence_parallel_dataset(
    dataset: Dataset,
    tokenizer: AutoTokenizer,
):
    args = get_args()

    if dataset is None:
        return None

    kwargs = dict(
        num_proc=args.preprocessing_num_workers,
        load_from_cache_file=args.local_process_index != 0,
        desc="Running padding split on dataset",
    )
    pad_sequence_func = _get_sequence_parallel_func(stage="pad", tokenizer=tokenizer)
    padded_dataset = dataset.map(pad_sequence_func, batched=True, batch_size=args.preprocessing_batch_size, **kwargs)
    kwargs = dict(
        num_proc=args.preprocessing_num_workers,
        load_from_cache_file=args.local_process_index != 0,
        desc="Running sequence parallel split on dataset",
    )
    sp_dataset_func = _get_sequence_parallel_func(stage="split", tokenizer=tokenizer)
    sp_dataset = padded_dataset.map(sp_dataset_func, batched=True, batch_size=args.preprocessing_batch_size, **kwargs)
    return sp_dataset


def _get_sequence_parallel_func(
    stage: Literal["pad", "split"],
    tokenizer: AutoTokenizer,
):
    args = get_args()

    if stage == "pad":
        preprocess_func = partial(pad_sequence, args=args, tokenizer=tokenizer)
    elif stage == "split":
        preprocess_func = partial(sp_split, args=args)

    return preprocess_func


def get_dataset_module(
    tokenizer: AutoTokenizer,
    template: Template,
    processor: Optional[ProcessorMixin] = None,
):
    args = get_args()

    with args.hf_seq2seq_args.main_process_first(desc="load dataset"):
        train_dataset = _get_merged_datasets(args.dataset)
        eval_dataset = _get_merged_datasets(args.eval_dataset)

        logger.debug("Finish load data, train_dataset = {}, eval_dataset = {}".format(train_dataset, eval_dataset))

    with args.hf_seq2seq_args.main_process_first(desc="preprocess dataset"):
        train_dataset = _get_preprocessed_dataset(train_dataset, template, tokenizer, processor)
        eval_dataset = _get_preprocessed_dataset(eval_dataset, template, tokenizer, processor)

    if args.sequence_parallel_size > 1:
        with args.hf_seq2seq_args.main_process_first(desc="preprocess dataset"):
            train_dataset = _get_sequence_parallel_dataset(train_dataset, tokenizer)
            eval_dataset = _get_sequence_parallel_dataset(eval_dataset, tokenizer)

    dataset_module = {"train_dataset": train_dataset, "eval_dataset": eval_dataset}

    return dataset_module
