使用transformers库加载模型时,提示类型错误 not a string
from transformers import AutoTokenizer
model_path = "./modesl/baichuan-7B'
tokenizer = AutoTokenizer .from_pretrained(model_path, trust_remote_code=True)
提示类型错误 not a string
Traceback (most recent call last):
File "/AI/models/baichuan-7B/test.py", line 7, in
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True , local_files_only=True )
File "/AI/anaconda3/envs/baichuan/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 678, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
File "/AI/anaconda3/envs/baichuan/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 1825, in from_pretrained
return cls._from_pretrained(
File "/AI/anaconda3/envs/baichuan/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 1988, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "/home/appuser/.cache/huggingface/modules/transformers_modules/baichuan-7B/tokenization_baichuan.py", line 89, in init
self.sp_model.Load(vocab_file)
File "/AI/anaconda3/envs/baichuan/lib/python3.10/site-packages/sentencepiece/init.py", line 905, in Load
return self.LoadFromFile(model_file)
File "/AI/anaconda3/envs/baichuan/lib/python3.10/site-packages/sentencepiece/init.py", line 310, in LoadFromFile
return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
TypeError: not a string
完整测试代码
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
import torch
#model_path = './models/baichuan-7B'
model_path = "/AI/models/baichuan-7B/models/baichuan-7B"
print(type(model_path))
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True , local_files_only=True )
tokenizer.pad_token_id = 0 if tokenizer.pad_token_id is None else tokenizer.pad_token_id # set as the token
if tokenizer.pad_token_id == 64000:
tokenizer.pad_token_id = 0 # for baichuan model (need fix)
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
print(">>> load model...")
model = AutoModelForCausalLM.from_pretrained(model_path, config=config, torch_dtype=torch.float16, trust_remote_code=True, device_map="auto", low_cpu_mem_usage=True)
print(">>> start pred...")
for i in ["写一首春天的诗歌:", '登鹳雀楼->王之涣\n夜雨寄北->']:
inputs = tokenizer(i, return_tensors='pt')
inputs = inputs.to('cuda:0')
print(f"start to pred: {i}")
pred = model.generate(**inputs, max_new_tokens=64)
print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
我也是,你现在解决了吗,我是用的是windows10环境,是win的问题吗
我用的Linux环境 CentOS7.4 没有解决