# Copyright 2024 the LlamaFactory team.
# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# This code is inspired by the LLaMA-Factory.
# https://github.com/hiyouga/LLaMA-Factory/blob/main/src/llamafactory/data/aligner.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import TypedDict, List, Optional, Union, Any, Sequence
from functools import partial

from datasets import concatenate_datasets

from openmind.flow.datasets.parser import InstructionDatasetAttr
from openmind.flow.arguments import get_args
from openmind.utils import logging

logger = logging.get_logger()


class ConversionOutput(TypedDict):
    prompt: List[List[dict]]
    response: List[List[dict]]
    system: List[List[dict]]
    tools: List[List[dict]]
    images: List
    videos: List
    audios: List


def convert_pairwise(examples, datasets_attr: InstructionDatasetAttr):
    """
    Convert the dataset to alpaca format.
    Args:
        examples: examples of datasets
        datasets_attr: The attributes of datasets.

    Returns:
        Out
    """
    outputs: ConversionOutput = {"_prompt": [], "_response": []}

    for i in range(len(examples[datasets_attr.prompt])):
        prompt = []
        content = []
        response = []

        if examples[datasets_attr.prompt][i]:
            content.append(examples[datasets_attr.prompt][i])

        prompt.append({"role": "user", "content": "\n".join(content)})

        if examples[datasets_attr.chosen][i] and examples[datasets_attr.rejected][i]:
            response.append(
                [
                    {"role": "assistant", "content": examples[datasets_attr.chosen][i]},
                    {"role": "assistant", "content": examples[datasets_attr.rejected][i]},
                ]
            )

        outputs["_prompt"].append(prompt)
        outputs["_response"].append(response)

    return outputs


def _build_history_prompt(history_data):
    prompt = []
    if history_data and isinstance(history_data, list):
        for history_item in history_data:
            prompt.append({"role": "user", "content": history_item[0]})
            prompt.append({"role": "assistant", "content": history_item[1]})
    return prompt


def convert_alpaca(examples, datasets_attr: InstructionDatasetAttr, convert_system=False, convert_tools=False):
    """
    Convert the dataset to alpaca format.
    Args:
        examples: examples of datasets
        datasets_attr: The attributes of datasets.
        convert_system: Whether to convert system.
        convert_tools: Whether to convert tools.

    Returns:
        Output after conversion.
    """
    outputs: ConversionOutput = {
        "prompt": [],
        "response": [],
        "system": [],
        "tools": [],
        "images": [],
        "videos": [],
        "audios": [],
    }

    # (#9) Currently, image input is not supported
    # (#10) Currently, the row names are hard-coded. In the future, they should be read through JSON.
    for i in range(len(examples["instruction"])):
        # (#11) Historical data needs to be added.
        prompt = []
        content = []

        if "history" in examples:
            prompt = _build_history_prompt(examples[datasets_attr.history][i])

        if examples[datasets_attr.prompt][i]:
            content.append(examples[datasets_attr.prompt][i])

        if examples.get(datasets_attr.query, None) and examples[datasets_attr.query][i]:
            content.append(examples[datasets_attr.query][i])

        prompt.append({"role": "user", "content": "\n".join(content)})

        if isinstance(examples[datasets_attr.response][i], str):
            response = [{"role": "assistant", "content": examples[datasets_attr.response][i]}]
        else:  # unsupervised
            response = []

        outputs["prompt"].append(prompt)
        outputs["response"].append(response)
        outputs["system"].append(examples[datasets_attr.system][i] if convert_system else "")
        outputs["tools"].append(examples[datasets_attr.tools][i] if convert_tools else "")
        outputs["images"].append(
            find_medias(examples[datasets_attr.images][i], datasets_attr) if datasets_attr.images else None,
        )
        outputs["videos"].append(
            find_medias(examples[datasets_attr.videos][i], datasets_attr) if datasets_attr.videos else None,
        )
        outputs["audios"].append(
            find_medias(examples[datasets_attr.audios][i], datasets_attr) if datasets_attr.audios else None,
        )

    return outputs


def find_medias(medias: Union[Any, Sequence[Any]], datasets_attr) -> Optional[List[Any]]:
    r"""
    Optionally concatenates media path to media dir when loading from local disk.
    """
    if not isinstance(medias, list):
        medias = [medias] if medias is not None else []
    elif len(medias) == 0:
        return None
    else:
        medias = medias[:]

    if isinstance(medias[0], str):
        for index, media in enumerate(medias):
            if os.path.isfile(os.path.join(datasets_attr.load_from, media)):
                medias[index] = os.path.join(datasets_attr.load_from, media)
            else:
                logger.warning_once(f"Media {media} does not exist in `media_dir`. Use original path.")

    return medias


def convert_sharegpt(examples, datasets_attr: InstructionDatasetAttr, convert_system=False, convert_tools=False):
    r"""
    Converts sharegpt format dataset to the standard format.
    Args:
        examples: examples of datasets
        datasets_attr: The attributes of datasets.
        convert_system: Whether to convert system.
        convert_tools: Whether to convert tools.

    Returns:
        Output after conversion.
    """
    outputs: ConversionOutput = {
        "prompt": [],
        "response": [],
        "system": [],
        "tools": [],
        "images": [],
        "videos": [],
        "audios": [],
    }
    value_map = {
        datasets_attr.user_tag: "user",
        datasets_attr.assistant_tag: "assistant",
        datasets_attr.observation_tag: "observation",
        datasets_attr.function_tag: "function",
        datasets_attr.system_tag: "system",
    }
    odd_tags = (datasets_attr.user_tag, datasets_attr.observation_tag)
    eve_tags = (datasets_attr.assistant_tag, datasets_attr.function_tag)
    accept_tags = (odd_tags, eve_tags)

    invalid_data = False

    # convert data
    conversations = examples[datasets_attr.messages]
    for conversation_list in conversations:
        messages = []
        prompt = []
        response = []
        for index, conversation in enumerate(conversation_list):
            if conversation[datasets_attr.role_tag] not in accept_tags[index % 2]:
                invalid_data = True

            prompt_converted = {
                "role": value_map.get(conversation[datasets_attr.role_tag]),
                "content": conversation[datasets_attr.content_tag],
            }
            messages.append(prompt_converted)
            prompt.append(prompt_converted)

        # check length
        if len(messages) % 2 != 0:
            invalid_data = True

        # convert prompt, response
        if not invalid_data:
            prompt = messages[:-1]
            response = messages[-1:]

        outputs["prompt"].append(prompt)
        outputs["response"].append(response)

    # convert system
    for i in range(len(examples[datasets_attr.messages])):
        outputs["system"].append(examples[datasets_attr.system_tag][i] if convert_system else "")
        outputs["tools"].append(examples[datasets_attr.tools][i] if convert_tools else "")
        outputs["images"].append(
            find_medias(examples[datasets_attr.images][i], datasets_attr) if datasets_attr.images else None,
        )
        outputs["videos"].append(
            find_medias(examples[datasets_attr.videos][i], datasets_attr) if datasets_attr.videos else None,
        )
        outputs["audios"].append(
            find_medias(examples[datasets_attr.audios][i], datasets_attr) if datasets_attr.audios else None,
        )

    return outputs


def convert_text(examples, text_column: str = "text"):
    """
    Convert the dataset to text format.
    Args:
        examples: examples of text datasets: [text_column:[[a sample of text dataset]]]
        text_column: text datasets key, default: "text"

    Returns:
        Output after convert.
    """
    outputs = {"texts": []}
    for i in range(len(examples[text_column])):
        if examples[text_column][i]:
            outputs["texts"].append(examples[text_column][i])
    return outputs


def align_dataset(
    dataset_attr: InstructionDatasetAttr,
    dataset,
):
    """
    In the align function, the format of the dataset is fixed.
    A field named "context" is added for the application of the template.
    Args:
        dataset: The dataset to be aligned.
        datasets_args: arguments of datasets

    Returns:
        The dataset after alignment.
    """
    args = get_args()

    # (#8) Support datasets in other formats such as sharegpt.
    if dataset_attr.formatting == "alpaca":
        convert_func = partial(
            convert_alpaca,
            datasets_attr=dataset_attr,
            convert_system=True if "system" in dataset.column_names else False,
            convert_tools=True if "tools" in dataset.column_names else False,
        )
    elif dataset_attr.formatting == "sharegpt":
        convert_func = partial(
            convert_sharegpt,
            datasets_attr=dataset_attr,
            convert_system=True if "system" in dataset.column_names else False,
            convert_tools=True if "tools" in dataset.column_names else False,
        )
    elif dataset_attr.formatting == "pairwise":
        convert_func = partial(
            convert_pairwise,
            datasets_attr=dataset_attr,
        )
    elif dataset_attr.formatting == "text":
        convert_func = partial(convert_text, text_column=dataset_attr.text_column)
    else:
        raise ValueError("Currently, Dataset formats only support alpaca, sharegpt, pairwise, text.")

    # The following code is consistent with the format of datasets in llama factory.
    column_names = list(next(iter(dataset)).keys())

    kwargs = dict(
        num_proc=args.preprocessing_num_workers,
        load_from_cache_file=args.local_process_index != 0,
        desc=f"Convert {dataset_attr.formatting} format dataset {dataset_attr.name} to standard format.",
    )
    return dataset.map(
        convert_func,
        batched=True,
        batch_size=args.preprocessing_batch_size,
        remove_columns=column_names,
        **kwargs,
    )


def merge_datasets(aligned_datasets: Optional[List[str]]):
    # Need to deal with the dataset in different cases (stf, rm...)
    if len(aligned_datasets) == 1:
        return aligned_datasets[0]
    else:
        return concatenate_datasets(aligned_datasets)
