# Copyright 2024 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindformers.tools.register import MindFormerRegister, MindFormerModuleType
from llava_dataloader import USER_TOKEN, ROBOT_TOKEN, SPLIT_TOKEN

# ignore token id 根据输入
ignore_token_id = -100


@MindFormerRegister.register(MindFormerModuleType.TRANSFORMS)
class LlavaTransform:
    """
    Caption Transform, preprocess captions and tokenize it,
    align with torch impl.
    """

    def __init__(self, tokenizer,
                 image_pad_tag="<image>",
                 prompt=None,
                 max_img_size=576,
                 padding="max_length",
                 max_length=512,
                 max_annotation=None,
                 random_seed=2022,
                 truncation=True,
                 add_special_tokens=True
                 ):

        self.tokenizer = tokenizer
        self.prompt = prompt
        self.max_img_size = max_img_size
        self.max_length = max_length
        self.padding = padding
        self.random_seed = random_seed
        self.truncation = truncation
        self.add_special_tokens = add_special_tokens
        self.max_annotation = max_annotation
        self.image_pad_tag = image_pad_tag
        if prompt is None:
            self.template = '<image>Describe the image in English'
        self.tokenizer_add_tokens(USER_TOKEN, ROBOT_TOKEN, SPLIT_TOKEN)
        self.add_bos_token = self.tokenizer.add_bos_token
        self.add_eos_token = self.tokenizer.add_eos_token

    def tokenizer_add_tokens(self, *args):
        special_tokens = [self.image_pad_tag]
        # special_tokens.extend(args)
        self.tokenizer.add_tokens(special_tokens, special_tokens=True)
        self.stop_token_id = self.tokenizer(SPLIT_TOKEN, add_special_tokens=False)["input_ids"][0]
        self.user_token_id = self.tokenizer(USER_TOKEN, add_special_tokens=False)["input_ids"][0]
        self.robot_token_id = self.tokenizer(ROBOT_TOKEN, add_special_tokens=False)["input_ids"][0]
        self.image_pad_id = self.tokenizer(self.image_pad_tag, add_special_tokens=False)["input_ids"][0]

    def __call__(self, caption, task="sft"):
        out = self.pre_process(caption, task)
        if len(out) == 2:
            cap_out, img_start_pos = out
            cap_out = np.stack(cap_out, dtype=np.int32)
            img_start_pos = np.stack(img_start_pos, dtype=np.int32)
            return cap_out, img_start_pos
        else:
            cap_out, img_start_pos, label = out
            cap_out = np.stack(cap_out, dtype=np.int32)
            img_start_pos = np.stack(img_start_pos, dtype=np.int32)
            label = np.stack(label, dtype=np.int32)
            return cap_out, img_start_pos, label

    def pre_process(self, inputs, task="sft"):
        if task != "sft":
            tokenized_inputs = self.tokenizer(inputs)["input_ids"]
            raw_input_ids = np.array(tokenized_inputs, dtype=np.int32)
            img_start_poses = np.where(raw_input_ids == self.image_pad_id)[0]
            replace_res = np.array([self.image_pad_id] * self.max_img_size, dtype=np.int32)
            for img_start_pos in img_start_poses:
                if img_start_pos + 1 < len(raw_input_ids):
                    raw_input_ids = np.concatenate(
                        (raw_input_ids[:img_start_pos], replace_res, raw_input_ids[img_start_pos + 1:]))
                else:
                    raw_input_ids = np.concatenate((raw_input_ids[:img_start_pos], replace_res))
            padding_ids = np.array([self.tokenizer.pad_token_id] * (self.max_length - len(raw_input_ids)),
                                   dtype=np.int32)
            raw_input_ids = np.concatenate((raw_input_ids, padding_ids))
            coord = self._generate_coord(img_start_poses)
            return raw_input_ids, coord
        else:
            raw_data = inputs.get("raw_data")
            raw_data_role = inputs.get("raw_data_role")
            if raw_data is None or raw_data_role is None:
                raise ValueError("raw_data and raw_data_role are required")
            raw_input_ids, raw_label = [], []
            for i, item in enumerate(raw_data):
                if i > 0:
                    self.tokenizer.add_bos_token = False
                else:
                    self.tokenizer.add_bos_token = self.add_bos_token
                if i < len(raw_data) - 1:
                    self.tokenizer.add_eos_token = False
                else:
                    self.tokenizer.add_eos_token = self.add_eos_token
                tokenized_item = self.tokenizer(item)["input_ids"]
                raw_input_ids.extend(tokenized_item)
                if raw_data_role[i] == (
                        "user" if inputs.get("user_role_name") is None else inputs.get("user_role_name")) \
                        or raw_data_role[i] == "system":
                    ignore_token = [ignore_token_id] * len(tokenized_item)
                    # stop_index = tokenized_item.index(self.stop_token_id)
                    # ignore_token[stop_index] = self.stop_token_id
                    raw_label.extend(ignore_token)
                elif raw_data_role[i] == 'assistant' if inputs.get(
                            'assistant_role_name') is None else inputs.get('user_role_name'):
                    data_role_input_ids = self.tokenizer(ROBOT_TOKEN + ": ")["input_ids"]
                    has_ignored_label = [ignore_token_id] * len(data_role_input_ids) + tokenized_item[
                                                                                       len(data_role_input_ids):]
                    raw_label.extend(has_ignored_label)
                else:
                    raise ValueError(f"raw_data_role {raw_data_role[i]} is invalid")

            raw_input_ids = np.array(raw_input_ids, dtype=np.int32)
            raw_label = np.array(raw_label, dtype=np.int32)
            img_token_id = self.tokenizer.encode(self.image_pad_tag, add_special_tokens=False)[0]
            img_start_poses = np.where(raw_input_ids == img_token_id)[0]
            replace_res = np.array([img_token_id] * self.max_img_size, dtype=np.int32)
            replace_label_res = np.array([ignore_token_id] * self.max_img_size, dtype=np.int32)
            for img_start_pos in img_start_poses:
                if img_start_pos + 1 < len(raw_input_ids):
                    raw_input_ids = np.concatenate(
                        (raw_input_ids[:img_start_pos], replace_res, raw_input_ids[img_start_pos + 1:]))
                    raw_label = np.concatenate(
                        (raw_label[:img_start_pos], replace_label_res, raw_label[img_start_pos + 1:]))
                else:
                    raw_input_ids = np.concatenate((raw_input_ids[:img_start_pos], replace_res))
                    raw_label = np.concatenate((raw_label[:img_start_pos], replace_label_res))
            padding_ids = np.array([self.tokenizer.pad_token_id] * (self.max_length - len(raw_input_ids)),
                                   dtype=np.int32)
            raw_input_ids = np.concatenate((raw_input_ids, padding_ids))
            padding_label_ids = np.array([ignore_token_id] * (self.max_length - len(raw_label)), dtype=np.int32)
            raw_label = np.concatenate((raw_label, padding_label_ids))
            if len(raw_input_ids) > self.max_length:
                raw_input_ids = raw_input_ids[:self.max_length]
                raw_label = raw_label[:self.max_length]
            coord = self._generate_coord(img_start_poses)
            return raw_input_ids, coord, raw_label

    def _generate_coord(self, img_start_pos):
        num_img = len(img_start_pos)
        coord = np.zeros((num_img, self.max_img_size, 2), np.int32)
        for idx, pos in enumerate(img_start_pos):
            for img_pos in range(self.max_img_size):
                coord[idx, img_pos] = [0, pos + img_pos]
        return coord

    def _add_stop_label(self, input_list, add_nl_token_id=False):
        stop_token_id = self.tokenizer.encode(SPLIT_TOKEN, add_special_tokens=False)
        if not add_nl_token_id:
            return input_list + stop_token_id
        nl_token_id = self.tokenizer("\n", add_special_tokens=False)["input_ids"]
        return input_list + stop_token_id + nl_token_id
