from mindformers import Blip2Llm, Blip2Config
from mindformers import MindFormerConfig
from mindformers.models.build_config import  build_model_config
import numpy as np
import mindspore as ms
ms.set_context(device_target="Ascend", device_id=6, mode=0)

yaml_path = "/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/configs/videochat2/run_videochat2_stage3.yaml"

config = MindFormerConfig(yaml_path)
config = build_model_config(config.model.model_config)

np.random.seed(1)

image = np.random.rand(1, 3, 8, 224, 224)
image = ms.Tensor(image, ms.float32)

text_input_ids = np.load("/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/numpy/raw_text.npy")
text_input_ids = np.where(text_input_ids == 32000, 0, text_input_ids)
text_input_ids = ms.Tensor([text_input_ids], ms.int32)

img_pos = np.load("/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/numpy/coord.npy")
img_pos = ms.Tensor([img_pos], ms.int32)

labels = np.load("/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/numpy/raw_label.npy")
labels = ms.Tensor([labels], ms.int32)

instruction_input_ids = np.load("/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/numpy/instruction_input_ids.npy")
instruction_input_ids = ms.Tensor([instruction_input_ids], ms.int32)

instruction_attention_mask = np.load("/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/numpy/instruction_attention_mask.npy")
instruction_attention_mask = ms.Tensor([instruction_attention_mask], ms.int32)

model = Blip2Llm(config)
model.set_train(True)

output = model.construct(image=image,
                         text_input_ids=text_input_ids,
                         labels=labels,
                         instruction_input_ids=instruction_input_ids,
                         instruction_attention_mask=instruction_attention_mask,
                         img_pos=img_pos
                         )
print(output)
