from transformers import AutoTokenizer, AutoModelForCausalLM
from gxl_ai_utils.utils import utils_file
import os
# os.environ['HF_ENDPOINT']="https://hf-mirror.com"  # 在命令行里面加入就可以了，别的代码完全不用动
# export HF_ENDPOINT=https://hf-mirror.com

# 加载 tokenizer 和模型，并指定 cache_dir 保存模型文件
# model_path = "/home/work_nfs15/asr_data/ckpt/Phi-3.5-mini-instruct/models--microsoft--Phi-3.5-mini-instruct/snapshots/af0dfb8029e8a74545d0736d30cb6b58d2f0f3f0"
# model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True,)
# tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True,)
# print(model)
import torch
model_path = "/home/node54_tmpdata/xlgeng/ckpt/qwen-7B-instruct/qwen2_7b"
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

# 获取embed_tokens层的权重参数
embed_tokens_weight = model.model.embed_tokens.weight.data

# 获取lm_head层的权重参数
lm_head_weight = model.lm_head.weight.data

# 比较两者的形状（shape）是否一致，形状一致是参数相同的必要条件之一
if embed_tokens_weight.shape == lm_head_weight.shape:
    # 进一步比较对应位置的元素是否完全相等
    are_weights_equal = torch.equal(embed_tokens_weight, lm_head_weight)
    if are_weights_equal:
        print("embed_tokens和lm_head的参数是相同的。")
    else:
        print("虽然形状相同，但参数具体数值不完全相同。")
else:
    print("embed_tokens和lm_head的参数不同，因为它们的形状不一样。")