load failed

#3
by qgai - opened

Loading checkpoint shards: 0%| | 0/3 [00:00<?, ?it/s]
Traceback (most recent call last):
File "/home/nvcntse/.local/lib/python3.8/site-packages/transformers/modeling_utils.py", line 442, in load_state_dict
return torch.load(checkpoint_file, map_location="cpu")
File "/home/nvcntse/.local/lib/python3.8/site-packages/torch/serialization.py", line 797, in load
with _open_zipfile_reader(opened_file) as opened_zipfile:
File "/home/nvcntse/.local/lib/python3.8/site-packages/torch/serialization.py", line 283, in init
super().init(torch._C.PyTorchFileReader(name_or_buffer))
RuntimeError: PytorchStreamReader failed reading zip archive: failed finding central directory

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "/home/nvcntse/.local/lib/python3.8/site-packages/transformers/modeling_utils.py", line 446, in load_state_dict
if f.read(7) == "version":
File "/usr/lib/python3.8/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 128: invalid start byte

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "build.py", line 376, in
build(0, args)
File "build.py", line 342, in build
engine = build_or_refit_rank_engine(builder, builder_config,
File "build.py", line 239, in build_or_refit_rank_engine
hf_llama = LlamaForCausalLM.from_pretrained(
File "/home/nvcntse/.local/lib/python3.8/site-packages/transformers/modeling_utils.py", line 2795, in from_pretrained
) = cls._load_pretrained_model(
File "/home/nvcntse/.local/lib/python3.8/site-packages/transformers/modeling_utils.py", line 3109, in _load_pretrained_model
state_dict = load_state_dict(shard_file)
File "/home/nvcntse/.local/lib/python3.8/site-packages/transformers/modeling_utils.py", line 458, in load_state_dict
raise OSError(
OSError: Unable to load weights from pytorch checkpoint file for '/home/nvcntse/qgai/p4_home_qgai/llama/CodeUp-alpha-Llama-2-13b-chat-hf/pytorch_model-00001-of-00003.bin' at '/home/nvcntse/qgai/p4_home_qgai/llama/CodeUp-alpha-Llama-2-13b-chat-hf/pytorch_model-00001-of-00003.bin'. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True.

Sign up or log in to comment