from dataset import MyDataset,DataCollator
import json
import torch
from modeling import fuseModelWithAppendToBegin
from transformers import AutoModel,AutoTokenizer,AutoModelForCausalLM,BertModel

# sent2vec_tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh-v1.5',model_max_length=512)
# sent2vec = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5',torch_dtype=torch.float16)
# llm_tokenizer = AutoTokenizer.from_pretrained('/data/lxy/baichuan2-chat-7b',trust_remote_code=True)
# llm = AutoModelForCausalLM.from_pretrained('/data/lxy/baichuan2-chat-7b',trust_remote_code=True,torch_dtype=torch.bfloat16)
# model=fuseModelWithAppendToBegin.from_pretrained('/hy-tmp/multi-r2/Fuser/add2begin/baichuan7bchat')

# print(model.sent2vec.state_dict())
# model.save_pretrained('/hy-tmp/damnModel',max_shard='5GB')
model=fuseModelWithAppendToBegin.from_pretrained('/hy-tmp/damnModel')
sent2vec= BertModel.from_pretrained('/hy-tmp/thenlper/gte-large-zh',)

# # print(sent2vec)
params1 = list(sent2vec.named_parameters())
params2 = list(model.sent2vec.named_parameters())

# 比较参数
for p1, p2 in zip(params1, params2):
    
    if torch.allclose(p1[1].data, p2[1].data):
        print(p1[0],p2[0])
        print(p1[1].data, p2[1].data)
        print("Parameters are equal.")

    else:
        print(p1[0],p2[0])
        # print(p1[1].data, p2[1].data)
        print("Parameters are not equal.")
# import torch
# model=torch.load('/hy-tmp/damnModel/pytorch_model-00004-of-00004.bin')
# print(model)
# from transformers import LlamaForCausalLM,LlamaModel,
# class C(LlamaForCausalLM):
#     def __init__(self,  config=None, *model_args, **model_kwargs):
#             super().__init__(config)
#             self.llama = LlamaModel(config)
#             self.bert= BertModel.from_pretrained('dir_to_bert_ckpt',low_cpu_mem_usage=True)
#             self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False)
#             self.post_init()

# model=C.from_pretrained('llama_ckpt_dir',low_cpu_mem_usage=True)
