# Copyright 2024 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""QwenVL DataLoader."""

import os
import json
import numpy as np
from PIL import Image
from typing import Callable, Optional, Union, List, Tuple
from pyarrow.lib import Table
from mindspore.dataset import GeneratorDataset
from mindformers.tools.register import MindFormerRegister, MindFormerModuleType
from mindformers.dataset.transforms.vision_transforms import BatchResize
from mindformers.dataset.dataloader.sft_dataloader import SFTDataSet

USER_TOKEN = "USER"
ROBOT_TOKEN = "ASSISTANT"
ROLE_MAP = {"human": USER_TOKEN, "gpt": ROBOT_TOKEN}
SPLIT_TOKEN = "<STOP>"
SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful," \
                 " detailed, and polite answers to the user's questions."


@MindFormerRegister.register(MindFormerModuleType.DATASET_LOADER)
class LlavaDataLoader:
    _default_column_names = ["image", "text"]

    def __new__(
            cls,
            dataset_dir: str,
            text_file: str,
            file_format: Union[List[str], str] = None,
            column_names: Union[List[str], Tuple[str]] = None,
            shuffle: Optional[bool] = False,
            image_size: int = 336,
            **kwargs
    ):
        if column_names is None:
            column_names = cls._default_column_names

        if not os.path.exists(dataset_dir):
            raise ValueError(f"The dataset directory {dataset_dir} is not exist.")

        if not text_file:
            raise ValueError(f"The text file is null.")

        if not os.path.join(os.path.join(dataset_dir, text_file)):
            raise ValueError(f"The text file {os.path.join(os.path.join(dataset_dir, text_file))} is not exist.")

        datasets = LlavaSFTDataset(dataset_dir, text_file, file_format, image_size)
        return GeneratorDataset(datasets, column_names, shuffle=shuffle, **kwargs)


class LlavaSFTDataset(SFTDataSet):
    def __init__(
            self,
            dataset_dir: str,
            text_file: str,
            file_format: str,
            image_size: int = 336,
            read_function: Callable = None,
            map_function_kwargs: dict = None
    ):
        self.resize = BatchResize(image_size, interpolation="cubic")
        text_file = os.path.join(dataset_dir, text_file)
        self.dataset_dir = dataset_dir
        super(LlavaSFTDataset, self).__init__(
            text_file, None, None, None, file_format, None, read_function, self._llava_map, map_function_kwargs
        )

    def __getitem__(self, i):
        example = self.table.take([i]).to_pylist()[0]
        result = self.map_function(example, **self.map_function_kwargs)
        img_dir = result.pop("img_dir")
        img = self.resize(np.array(Image.open(img_dir).convert("RGB")))
        return img, result

    def __len__(self):
        return len(self.table)

    def _llava_map(self, example, **kwargs):
        img_dir = example.get("image")
        if img_dir is None:
            return dict()
        img_dir = os.path.join(self.dataset_dir, img_dir)
        data_field = kwargs.get("data_field", "conversations")
        from_keyword, value_keyword = kwargs.get("from_keyword", "from"), kwargs.get("value_keyword", "value")
        user_role_name = kwargs.get("user_role_name", "human")
        assistant_role_name = kwargs.get("assistant_role_name", "gpt")
        user_prompt, assistant_prompt = kwargs.get("user_prompt", ""), kwargs.get("assistant_prompt", "")
        system_message = kwargs.get("system_message", SYSTEM_MESSAGE)

        raw_data = []
        raw_data_role = []

        # 增加系统信息
        # Todo
        system = system_message
        raw_data.append(system)
        raw_data_role.append('system')

        for message in example[data_field]:
            from_ = message[from_keyword]
            value = message[value_keyword]
            raw_data_role.append(from_)
            if from_ not in ROLE_MAP:
                raise ValueError(f"Incorrect role name: {from_}. Check the values of `user_role_name` "
                                 f"and `assistant_role_name` in `map_function_kwargs`.")
            if from_ == user_role_name:
                raw_data.append(ROLE_MAP[from_] + ': ' + user_prompt + value)
            elif from_ == assistant_role_name:
                raw_data.append(ROLE_MAP[from_] + ': ' + assistant_prompt + value)
            else:
                raise ValueError(f"Incorrect role name: {from_}. Check the values of `user_role_name` "
                                 f"and `assistant_role_name` in `map_function_kwargs`.")
        return dict(img_dir=img_dir, raw_data=raw_data, raw_data_role=raw_data_role, user_role_name=user_role_name,
                    assistant_role_name=assistant_role_name)

    # def _check_format(self, dataset_dir, file_format="json"):
    #     if isinstance(dataset_dir, str):
    #         dataset_dir = [dataset_dir]
    #     if isinstance(file_format, str):
    #         file_format = [file_format]
    #     if file_format is None:
    #         file_format = ["json"] * len(dataset_dir)
    #     assert len(dataset_dir) == len(file_format)
    #     file_format_res = []
    #     for sub_dataset_dir, sub_file_format in zip(dataset_dir, file_format):
    #         if sub_file_format in ("json", "jsonl"):
    #             try:
    #                 with open(sub_dataset_dir, 'r', encoding="utf-8") as f:
    #                     json.load(f)
    #                 sub_file_format = "json"
    #             except json.decoder.JSONDecodeError:
    #                 sub_file_format = "jsonl"
    #         if sub_file_format in self._general_reader_map:
    #             file_format_res.append(sub_file_format)
    #         else:
    #             raise ValueError("The dataset file format can only be json, jsonl, csv, tsv, and parquet.")
    #     return file_format_res

    @staticmethod
    def filter_data_wo_image(data):
        length = len(data)
        for j in range(length - 1, -1, -1):
            if "image" not in data[j]:
                del data[j]
                continue
            if not data[j]["image"].startswith("coco/train2017"):
                del data[j]
                continue
        return data

    def _read_json(self, path):
        """Reads data in JSON format."""
        with open(path, 'r', encoding='UTF-8') as f:
            data = json.load(f)
        data = self.filter_data_wo_image(data)
        if isinstance(data, dict):
            return Table.from_pydict(data)
        if isinstance(data, list):
            # For pyarrow 12.0.1, pyarrow.lib.Table does not support from_pylist.
            pydict = {k: [i[k] for i in data] for k in data[0]}
            return Table.from_pydict(pydict)
        raise NotImplementedError
