# Load model directly
from transformers import AutoModelForCausalLM,AutoTokenizer
from torch import nn
tokenizer = AutoTokenizer.from_pretrained('/data/lxy/baichuan2-chat-7b',trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained('/data/lxy/baichuan2-chat-7b',trust_remote_code=True).half()

# print(model)
# class projecter(nn.Module):
#     def __init__(self, *args, **kwargs) -> None:
#         super().__init__(*args, **kwargs)
#         self.layers = nn.ModuleList([nn.Linear(1024,) for _ in range(32)])
def a(*args,**kwargs):
    print('args',args)
    print('kwargs',kwargs)
    print('hello')

projecter=nn.Linear(1024,4096)
# model.model.layers[2].mlp.register_forward_pre_hook(a)
model.register_module_forward_pre_hook(a)
i="你好"
inputs=tokenizer(i,return_tensors='pt').to('cuda')
model.cuda()
ss=model.generate(**inputs)
print(ss)
