# Copyright 2024 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import mindspore as ms
from mindspore import Tensor
from mindspore import dtype as mstype
from llava import LlavaBase
from mindformers import MindFormerRegister, MindFormerModuleType
from llava_config import LlavaConfig
from mindformers.tools import logger
from mindspore.ops import operations as P
from mindspore import ops


@MindFormerRegister.register(MindFormerModuleType.MODELS)
class LlavaVlm(LlavaBase):
    def __init__(self, config: LlavaConfig, **kwargs):
        super(LlavaVlm, self).__init__(config, **kwargs)
        self.config = config if config is not None else LlavaConfig()

        self.vision_encoder = self.init_vision_encoder()
        self.adapter = self.init_adapter()
        self.llm_model = self.init_llm()
        self.freeze_component()
        self.is_first_iteration = True
        self.use_past = config.use_past

        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.ones_like = P.OnesLike()
        self.cast = P.Cast()
        self.not_equal = P.NotEqual()
        self.slice = P.StridedSlice()
        self.pad_token_id = config.pad_token_id
        self.eos_token_id = config.eos_token_id
        self.ignore_token_id = ms.Tensor(config.ignore_token_id, mstype.int32)
        self.batch_size = config.text_config.batch_size
        self.batch_index_adder = ms.Tensor(
            [[i, 0] for i in range(self.batch_size)], ms.int32).reshape(self.batch_size, 1, 1, 2)
        self.img_pos_add = P.Add()
        self.tensor_scatter_update = ops.TensorScatterUpdate().shard(((1, 1, 1),
                                                                      (1, 1, 1),
                                                                      (1, 1, 1)))
        # self.load_checkpoint(config)
        config.checkpoint_name_or_path = "/home/zhangyouwen/suite/mobile_commucation/mindformers/checkpoint_download/mm_projector.ckpt"
        self.load_checkpoint(config)
        # config.checkpoint_name_or_path = "/home/zhangyouwen/suite/mobile_commucation/accuracy/checkpoint_download/clip/only_visual_clip_vit_l_14@336_wo_prefix.ckpt"
        # self.load_checkpoint(config)
        # config.checkpoint_name_or_path = "/home/zhangyouwen/suite/mobile_commucation/accuracy/llama_wo_prefix.ckpt"
        # self.load_checkpoint(config)

    def freeze_component(self):
        if self.config.freeze_vision:
            logger.info("freeze vision encoder")
            for param in self.vision_encoder.trainable_params():
                if not self.config.freeze_resampler and "vision_encoder.attn_pool" in param.name:
                    param.requires_grad = True
                else:
                    param.requires_grad = False

        if self.config.freeze_llm:
            logger.info("freeze llm model")
            for param in self.llm_model.trainable_params():
                param.requires_grad = False

    def concat_image_text(self, text_embeds, image_embeds, img_pos):
        img_pos = self.img_pos_add(img_pos, self.batch_index_adder).reshape((-1,) + img_pos.shape[-2:])
        image_embeds = self.cast(image_embeds, text_embeds.dtype)
        text_embeds = self.tensor_scatter_update(text_embeds, img_pos, image_embeds)
        return text_embeds

    def prepare_inputs_for_generation(self, input_ids, **kwargs):
        input_position = kwargs.get("current_index", None)
        img_pos = kwargs.get("img_pos", None)

        if input_position is not None:
            input_position = ms.Tensor(input_position, mstype.int32)

        if self.is_first_iteration or not self.use_past:
            images = kwargs.pop("images")
        else:
            images = None

        if img_pos is not None:
            img_pos = ms.Tensor(img_pos, ms.int32)

        return {
            "input_ids": ms.Tensor(input_ids, mstype.int32),
            "images": images,
            "input_position": input_position,
            "img_pos": img_pos
        }

    def construct(self, input_ids, images, img_pos: Tensor = None, labels=None,
                  input_position=None, position_ids=None, attention_mask=None, init_reset=True, batch_valid_length=None,
                  batch_index=None, zactivate_len=None):

        bs, seq_len = self.shape(input_ids)
        if self.training:
            tokens = self.slice(input_ids, (0, 0), (bs, seq_len - 1), (1, 1))
            labels = self.slice(labels, (0, 1), (bs, seq_len), (1, 1))
        else:
            tokens = input_ids

        input_embeds = self.llm_model.model.tok_embeddings(tokens)
        input_attn_mask = self.cast(self.not_equal(tokens, self.pad_token_id), mstype.float32)

        if images is not None:
            if images.ndim == 5:
                images_shape = self.shape(images)
                new_shape = (images_shape[0] * images_shape[1], images_shape[2], images_shape[3], images_shape[4])
                images = self.reshape(images, new_shape)
        image_embeds = self.vision_encoder(images)
        image_embeds = self.adapter(image_embeds)
        input_embeds = self.concat_image_text(input_embeds, image_embeds, img_pos)

        return self.llm_model(
            input_embeddings=input_embeds,
            attention_mask=input_attn_mask,
            labels=labels,
            input_position=input_position,
            init_reset=init_reset,
            batch_valid_length=batch_valid_length
        )
