# Copyright 2024 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindformers.tools.register import MindFormerRegister, MindFormerModuleType
import copy

# ignore token id 根据输入
ignore_token_id = -100


@MindFormerRegister.register(MindFormerModuleType.TRANSFORMS)
class VideoChat2Transform:
    """
    Caption Transform, preprocess captions and tokenize it,
    align with torch impl.
    """

    def __init__(self,
                 tokenizer,
                 qformer_tokenizer,
                 image_pad_tag="<image>",
                 qformer_length=32,
                 max_length=4097,
                 random_seed=2022,
                 max_txt_len=512
                 ):

        self.tokenizer = tokenizer
        self.qformer_tokenizer = qformer_tokenizer
        self.qformer_length = qformer_length
        self.max_length = max_length
        self.random_seed = random_seed
        self.image_pad_tag = image_pad_tag
        self.img_begin_token = "<Image>"
        self.img_end_token = "</Image>"
        self.start_token = "<Video>"
        self.end_token = "</Video>"
        self.begin_signal = "###"
        self.role = ("Human", "Assistant")
        self.tokenizer_add_tokens()
        self.max_txt_len = max_txt_len

    def __call__(self, caption, task="sft"):
        out = self.pre_process(caption)
        raw_text, coord, raw_label, instruction_input_ids, instruction_attention_mask = out
        return raw_text, coord, raw_label, instruction_input_ids, instruction_attention_mask

    def tokenizer_add_tokens(self):
        special_tokens = [self.image_pad_tag]
        self.tokenizer.add_tokens(special_tokens, special_tokens=True)
        self.image_pad_id = self.tokenizer(self.image_pad_tag, add_special_tokens=False)["input_ids"][0]

    def _get_text_len(self, text):
        return self.tokenizer(text, return_tensors="np", add_special_tokens=False).input_ids.shape[0]

    def pre_process(self, inputs):
        conversation = inputs.get("conversation")
        instruction = inputs.get("instruction")

        p_before, p_after = conversation.split(self.end_token)
        p_after = self.end_token + p_after
        p_before_tokens = self.tokenizer(p_before, return_tensors="np", add_special_tokens=False).input_ids
        p_after_tokens = self.tokenizer(p_after, return_tensors="np", add_special_tokens=False).input_ids

        sep1 = self.begin_signal + self.role[0] + ": "
        sep2 = self.begin_signal + self.role[1] + ": "
        raw_text = p_after.split(sep2)
        for idx in range(1, len(raw_text)):
            raw_text[idx] = sep2 + raw_text[idx]

        # the first raw_text contains system and question
        # the last raw_text only contains answer
        # rstrip() for the extra " "
        answer_targets = copy.deepcopy(p_after_tokens)
        # target: "###Human:       ###Assistant: xxxxx. ###"
        system = raw_text[0].split(sep1)[0]
        system_len = self._get_text_len(system.rstrip())
        sep_len = self._get_text_len(sep1.rstrip())
        cur_len = self._get_text_len(raw_text[0].rstrip())
        answer_targets[0:system_len] = -100
        answer_targets[(system_len + sep_len):cur_len] = -100
        for text in raw_text[1:-1]:
            total_len = self._get_text_len(text.rstrip())
            ans_len = self._get_text_len((text.split(sep1)[0] + sep1).rstrip())
            answer_targets[(cur_len + ans_len):(cur_len + total_len)] = -100
            cur_len += total_len
        cur_len += self._get_text_len(raw_text[-1].rstrip())
        assert cur_len == answer_targets.shape[0], f"The final length ({cur_len}) is not equal to the original " \
                                                   f"prompt ({answer_targets.shape[0]}): {conversation}"

        # p_before
        p_before_label = np.ones((p_before_tokens.shape[0] + 1,), dtype=np.int32) * -100
        p_before_text = np.insert(p_before_tokens, 0, self.tokenizer.bos_token_id)

        # qformer placeholder
        qformer_label = np.ones((self.qformer_length,), dtype=np.int32) * -100
        qformer_text = np.ones((self.qformer_length,), dtype=np.int32) * self.image_pad_id

        # p_after
        p_after_label = answer_targets
        p_after_text = p_after_tokens

        if p_after_text.shape[0] > self.max_length - self.qformer_length - p_before_text.shape[0]:
            p_after_label = p_after_label[:self.max_length - self.qformer_length - p_before_text.shape[0]]
            p_after_text = p_after_text[:self.max_length - self.qformer_length - p_before_text.shape[0]]

        raw_text_tmp = np.concatenate((p_before_text, qformer_text, p_after_text))
        raw_label_tmp = np.concatenate((p_before_label, qformer_label, p_after_label))

        padding_ids = np.array([self.tokenizer.pad_token_id] * (self.max_length - len(raw_text_tmp)), dtype=np.int32)

        raw_text = np.concatenate((raw_text_tmp, padding_ids))
        raw_label = np.concatenate((raw_label_tmp, padding_ids))

        # instruction
        res_tmp = self.qformer_tokenizer(instruction, return_tensors="np", padding="max_length",
                                         max_length=self.max_txt_len)
        instruction_input_ids, instruction_attention_mask = res_tmp.input_ids, res_tmp.attention_mask

        img_start_poses = [np.where(raw_text == self.image_pad_id)[0][0]]
        coord = self._generate_coord(img_start_poses)

        return raw_text, coord, raw_label, instruction_input_ids, instruction_attention_mask

    def _generate_coord(self, img_start_pos):
        num_img = len(img_start_pos)
        coord = np.zeros((num_img, self.qformer_length, 2), np.int32)
        for idx, pos in enumerate(img_start_pos):
            for img_pos in range(self.qformer_length):
                coord[idx, img_pos] = [0, pos + img_pos]
        return coord


if __name__ == "__main__":
    inputs = {
        "text": [{'i': "Analyze the video to figure out what's going on. Choose the correct answer that either explains why, forecasts what comes next, or imagines a different situation.", 'q': "Question: Which of the following is responsible for the collision between the gray object and the cube?\nOptions:\n(A) the collision between the gray sphere and the purple sphere\n(B) the presence of the metal sphere\n(C) the blue rubber sphere's entering the scene\n(D) the presence of the blue rubber sphere", 'a': 'Answer: (D) the presence of the blue rubber sphere'}],
        "conversation": """Analyze the video to figure out what's going on. Choose the correct answer that either explains why, forecasts what comes next, or imagines a different situation. ###Human: <Video></Video> ###Human: Question: Which of the following is responsible for the collision between the gray object and the cube?
Options:
(A) the collision between the gray sphere and the purple sphere
(B) the presence of the metal sphere
(C) the blue rubber sphere's entering the scene
(D) the presence of the blue rubber sphere ###Assistant: Answer: (D) the presence of the blue rubber sphere ###""",
        "instruction": """Analyze the video to figure out what's going on. Choose the correct answer that either explains why, forecasts what comes next, or imagines a different situation. Question: Which of the following is responsible for the collision between the gray object and the cube?
Options:
(A) the collision between the gray sphere and the purple sphere
(B) the presence of the metal sphere
(C) the blue rubber sphere's entering the scene
(D) the presence of the blue rubber sphere"""
    }

    from mindformers import LlamaTokenizer, BertTokenizer
    tokenizer = LlamaTokenizer("/home/zhangyouwen/work/data/mobile_video_data/tokenizer.model")
    qformer_tokenizer = BertTokenizer("/home/zhangyouwen/suite/mobile_commucation/vocab.txt")
    blip2_transform = VideoChat2Transform(tokenizer, qformer_tokenizer)
    blip2_transform(inputs)
