from mindformers.models.blip2 import Blip2Qformer, Blip2Config
from mindformers import BertTokenizer
import numpy as np
import mindspore as ms

ms.set_context(mode=1, device_id=7)

np.random.seed(1)

image_inputs = np.random.rand(4, 4, 3, 224, 224).astype(np.float32)

image_inputs = ms.Tensor(image_inputs)

image_inputs = image_inputs.transpose(0, 2, 1, 3, 4)

tokenizer = BertTokenizer(vocab_file="/home/zhangyouwen/suite/mobile_commucation/bert_uncased/vocab.txt")

string_list = ["I love Beijing.", "I love Henan.", "I love Tianjin.", "I love Shanghai."]

text_inputs = tokenizer(string_list, padding="max_length", max_length=10, return_tensors="ms")["input_ids"]

# ckpt_path = "/home/zhangyouwen/suite/mobile_commucation/umt_l16_qformer.ckpt"
#
# state_dict = ms.load_checkpoint(ckpt_path)

config = Blip2Config.from_pretrained("blip2_stage1_vit_g")

config.vision_config.checkpoint_name_or_path = ""
config.qformer_config.checkpoint_name_or_path = ""
config.qformer_config.encoder_width = 32    # 1024
config.qformer_config.head_embed_dim = 24   # 768
config.qformer_config.hidden_size = 24
config.qformer_config.num_hidden_layers = 1
config.qformer_config.intermediate_size = 32
config.qformer_config.resize_token_embeddings = False
config.qformer_config.vtm_cat_text_cls = True
config.vision_config.embed_dim = config.qformer_config.encoder_width
config.vision_config.depth = 1
config.vision_config.intermediate_size = 32

print(config.qformer_config)
print(config.vision_config)

model = Blip2Qformer(config)

model.set_train(False)

# ms.load_param_into_net(model, state_dict)

# print("load param into net finished!")

output = model(image_inputs, text_inputs)

print(output)
