AttributeError: 'XgenTokenizer' object has no attribute 'encoder'

#15
by StevenLyp - opened

class XGen(LLM):
max_token: int = 8192
temperature: float = 0.8
top_p = 0.9
tokenizer: object = None
model: object = None

def __init__(self):
    super().__init__()
    
@property
def _llm_type(self) -> str:
    return "XGen"
        
def load_model(self, llm_device="gpu", model_name_or_path=None):
    self.model, self.tokenizer = load_model(model_name_or_path, 'cuda',  1)  # ้€‰ๆ‹ฉไฝฟ็”จ่ฎญ็ปƒ็š„GPU)
    print('xgen model load finished')

def _call(self,prompt:str, stop: Optional[List[str]] = None):
    input_ids = self.tokenizer([prompt]).input_ids
    
    output_ids = self.model.generate(
                torch.as_tensor(input_ids).to('cuda'), temperature=self.temperature,max_new_tokens=self.max_token
                ) 
    output_ids = output_ids[0][len(input_ids[0]) :]
    response = self.tokenizer.decode(
        output_ids, skip_special_tokens=True, spaces_between_special_tokens=False
    )
    
    return response.replace('\\', '').split('\n')[0]

os.environ["CUDA_VISIBLE_DEVICES"] = use_gpu(0.1)
import logging

model_path = r"/data/package/lyp/model/xgen_7b_8k_inst"
llm = XGen()
llm.load_model(model_name_or_path = model_path)

Error mssage๏ผš
Loading checkpoint shards: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 3/3 [00:08<00:00, 2.88s/it]
Traceback (most recent call last):
File "/data/package/lyp/model/xgen_7b_8k_inst/test.py", line 74, in
llm.load_model(model_name_or_path = model_path)
File "/data/package/lyp/model/xgen_7b_8k_inst/test.py", line 52, in load_model
self.model, self.tokenizer = load_model(model_name_or_path, 'cuda', 1) # ้€‰ๆ‹ฉไฝฟ็”จ่ฎญ็ปƒ็š„GPU)
File "/root/anaconda3/envs/fastchat/lib/python3.9/site-packages/fastchat/model/model_adapter.py", line 288, in load_model
model, tokenizer = adapter.load_model(model_path, kwargs)
File "/root/anaconda3/envs/fastchat/lib/python3.9/site-packages/fastchat/model/model_adapter.py", line 1201, in load_model
tokenizer = AutoTokenizer.from_pretrained(
File "/root/anaconda3/envs/fastchat/lib/python3.9/site-packages/transformers/models/auto/tokenization_auto.py", line 738, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
File "/root/anaconda3/envs/fastchat/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 2045, in from_pretrained
return cls._from_pretrained(
File "/root/anaconda3/envs/fastchat/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 2256, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/xgen_7b_8k_inst/tokenization_xgen.py", line 137, in init
super().init(
File "/root/anaconda3/envs/fastchat/lib/python3.9/site-packages/transformers/tokenization_utils.py", line 366, in init
self._add_tokens(self.all_special_tokens_extended, special_tokens=True)
File "/root/anaconda3/envs/fastchat/lib/python3.9/site-packages/transformers/tokenization_utils.py", line 462, in _add_tokens
current_vocab = self.get_vocab().copy()
File "/root/.cache/huggingface/modules/transformers_modules/xgen_7b_8k_inst/tokenization_xgen.py", line 154, in get_vocab
vocab = {self.encoder.decode_single_token_bytes(i): i for i in range(self.vocab_size)}
File "/root/.cache/huggingface/modules/transformers_modules/xgen_7b_8k_inst/tokenization_xgen.py", line 150, in vocab_size
return self.encoder.n_vocab
AttributeError: 'XgenTokenizer' object has no attribute 'encoder'

Hi @StevenLyp , please clear the hf cache, and retry.

Sign up or log in to comment