Model loading failure

#1
by xldistance - opened

Traceback (most recent call last):
File "H:\Langchain-Chatchat0.3\tabbyAPI\start.py", line 295, in
entrypoint(converted_args)
File "H:\Langchain-Chatchat0.3\tabbyAPI\main.py", line 164, in entrypoint
asyncio.run(entrypoint_async())
File "H:\Langchain-Chatchat0.3.glut\lib\asyncio\runners.py", line 44, in run
return loop.run_until_complete(main)
File "winloop\loop.pyx", line 1521, in winloop.loop.Loop.run_until_complete
File "H:\Langchain-Chatchat0.3\tabbyAPI\main.py", line 70, in entrypoint_async
await model.load_model(
File "H:\Langchain-Chatchat0.3\tabbyAPI\common\model.py", line 112, in load_model
async for _ in load_model_gen(model_path, **kwargs):
File "H:\Langchain-Chatchat0.3\tabbyAPI\common\model.py", line 90, in load_model_gen
async for module, modules in load_status:
File "H:\Langchain-Chatchat0.3\tabbyAPI\backends\exllamav2\model.py", line 564, in load_gen
async for value in iterate_in_threadpool(model_load_generator):
File "H:\Langchain-Chatchat0.3\tabbyAPI\common\concurrency.py", line 30, in iterate_in_threadpool
yield await asyncio.to_thread(gen_next, generator)
File "H:\Langchain-Chatchat0.3.glut\lib\asyncio\threads.py", line 25, in to_thread
return await loop.run_in_executor(None, func_call)
File "H:\Langchain-Chatchat0.3.glut\lib\concurrent\futures\thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "H:\Langchain-Chatchat0.3\tabbyAPI\common\concurrency.py", line 20, in gen_next
return next(generator)
File "H:\Langchain-Chatchat0.3.glut\lib\site-packages\torch\utils_contextlib.py", line 36, in generator_context
response = gen.send(None)
File "H:\Langchain-Chatchat0.3\tabbyAPI\backends\exllamav2\model.py", line 606, in load_model_sync
self.tokenizer = ExLlamaV2Tokenizer(self.config)
File "H:\Langchain-Chatchat0.3.glut\lib\site-packages\exllamav2\tokenizer\tokenizer.py", line 130, in init
self.tokenizer_model = ExLlamaV2TokenizerHF(path_hf)
File "H:\Langchain-Chatchat0.3.glut\lib\site-packages\exllamav2\tokenizer\hf.py", line 22, in init
self.hf_tokenizer = Tokenizer.from_file(tokenizer_json)
Exception: data did not match any variant of untagged enum ModelWrapper at line 501272 column 3

exllamav2 0.2.7+cu121.torch2.5.0

Updated the transformers just fine.

pipilok changed discussion status to closed

Sign up or log in to comment