|
2024-02-26 21:25:09 | INFO | model_worker | args: Namespace(host='localhost', port=21002, worker_address='http://localhost:21002', controller_address='http://localhost:21001', model_path='MBZUAI/MobiLlama-0.5B_chat', revision='main', device='cuda', gpus=None, num_gpus=1, max_gpu_memory=None, dtype=None, load_8bit=False, cpu_offloading=False, gptq_ckpt=None, gptq_wbits=16, gptq_groupsize=-1, gptq_act_order=False, awq_ckpt=None, awq_wbits=16, awq_groupsize=-1, enable_exllama=False, exllama_max_seq_len=4096, exllama_gpu_split=None, exllama_cache_8bit=False, enable_xft=False, xft_max_seq_len=4096, xft_dtype=None, model_names=None, conv_template=None, embed_in_truncate=False, limit_worker_concurrency=5, stream_interval=2, no_register=False, seed=None, debug=False, ssl=False) |
|
2024-02-26 21:25:09 | INFO | model_worker | Loading the model ['MobiLlama-0.5B_chat'] on worker f5709397 ... |
|
2024-02-26 21:25:09 | ERROR | stderr | Traceback (most recent call last): |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py", line 270, in hf_raise_for_status |
|
2024-02-26 21:25:09 | ERROR | stderr | response.raise_for_status() |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/requests/models.py", line 1021, in raise_for_status |
|
2024-02-26 21:25:09 | ERROR | stderr | raise HTTPError(http_error_msg, response=self) |
|
2024-02-26 21:25:09 | ERROR | stderr | requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/MBZUAI/MobiLlama-0.5B_chat/resolve/main/tokenizer_config.json |
|
2024-02-26 21:25:09 | ERROR | stderr | |
|
2024-02-26 21:25:09 | ERROR | stderr | The above exception was the direct cause of the following exception: |
|
2024-02-26 21:25:09 | ERROR | stderr | |
|
2024-02-26 21:25:09 | ERROR | stderr | Traceback (most recent call last): |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/transformers/utils/hub.py", line 429, in cached_file |
|
2024-02-26 21:25:09 | ERROR | stderr | resolved_file = hf_hub_download( |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 118, in _inner_fn |
|
2024-02-26 21:25:09 | ERROR | stderr | return fn(*args, **kwargs) |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/huggingface_hub/file_download.py", line 1374, in hf_hub_download |
|
2024-02-26 21:25:09 | ERROR | stderr | raise head_call_error |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/huggingface_hub/file_download.py", line 1247, in hf_hub_download |
|
2024-02-26 21:25:09 | ERROR | stderr | metadata = get_hf_file_metadata( |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 118, in _inner_fn |
|
2024-02-26 21:25:09 | ERROR | stderr | return fn(*args, **kwargs) |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/huggingface_hub/file_download.py", line 1624, in get_hf_file_metadata |
|
2024-02-26 21:25:09 | ERROR | stderr | r = _request_wrapper( |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/huggingface_hub/file_download.py", line 402, in _request_wrapper |
|
2024-02-26 21:25:09 | ERROR | stderr | response = _request_wrapper( |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/huggingface_hub/file_download.py", line 426, in _request_wrapper |
|
2024-02-26 21:25:09 | ERROR | stderr | hf_raise_for_status(response) |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py", line 320, in hf_raise_for_status |
|
2024-02-26 21:25:09 | ERROR | stderr | raise RepositoryNotFoundError(message, response) from e |
|
2024-02-26 21:25:09 | ERROR | stderr | huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-65dcf3a5-07352a30663ed69061780198;566cf227-6e78-441a-8e00-afe54bec8bf9) |
|
2024-02-26 21:25:09 | ERROR | stderr | |
|
2024-02-26 21:25:09 | ERROR | stderr | Repository Not Found for url: https://huggingface.co/MBZUAI/MobiLlama-0.5B_chat/resolve/main/tokenizer_config.json. |
|
2024-02-26 21:25:09 | ERROR | stderr | Please make sure you specified the correct `repo_id` and `repo_type`. |
|
2024-02-26 21:25:09 | ERROR | stderr | If you are trying to access a private or gated repo, make sure you are authenticated. |
|
2024-02-26 21:25:09 | ERROR | stderr | |
|
2024-02-26 21:25:09 | ERROR | stderr | The above exception was the direct cause of the following exception: |
|
2024-02-26 21:25:09 | ERROR | stderr | |
|
2024-02-26 21:25:09 | ERROR | stderr | Traceback (most recent call last): |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/runpy.py", line 196, in _run_module_as_main |
|
2024-02-26 21:25:09 | ERROR | stderr | return _run_code(code, main_globals, None, |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/runpy.py", line 86, in _run_code |
|
2024-02-26 21:25:09 | ERROR | stderr | exec(code, run_globals) |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/mnt/beegfs/fahad.khan/mobile_check/FastChat/fastchat/serve/model_worker.py", line 414, in <module> |
|
2024-02-26 21:25:09 | ERROR | stderr | args, worker = create_model_worker() |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/mnt/beegfs/fahad.khan/mobile_check/FastChat/fastchat/serve/model_worker.py", line 385, in create_model_worker |
|
2024-02-26 21:25:09 | ERROR | stderr | worker = ModelWorker( |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/mnt/beegfs/fahad.khan/mobile_check/FastChat/fastchat/serve/model_worker.py", line 77, in __init__ |
|
2024-02-26 21:25:09 | ERROR | stderr | self.model, self.tokenizer = load_model( |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/mnt/beegfs/fahad.khan/mobile_check/FastChat/fastchat/model/model_adapter.py", line 350, in load_model |
|
2024-02-26 21:25:09 | ERROR | stderr | model, tokenizer = adapter.load_model(model_path, kwargs) |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/mnt/beegfs/fahad.khan/mobile_check/FastChat/fastchat/model/model_adapter.py", line 85, in load_model |
|
2024-02-26 21:25:09 | ERROR | stderr | tokenizer = AutoTokenizer.from_pretrained( |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 701, in from_pretrained |
|
2024-02-26 21:25:09 | ERROR | stderr | tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs) |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 534, in get_tokenizer_config |
|
2024-02-26 21:25:09 | ERROR | stderr | resolved_config_file = cached_file( |
|
2024-02-26 21:25:09 | ERROR | stderr | File "/home/ashmal.vayani/anaconda3/envs/finetune_amber/lib/python3.10/site-packages/transformers/utils/hub.py", line 450, in cached_file |
|
2024-02-26 21:25:09 | ERROR | stderr | raise EnvironmentError( |
|
2024-02-26 21:25:09 | ERROR | stderr | OSError: MBZUAI/MobiLlama-0.5B_chat is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' |
|
2024-02-26 21:25:09 | ERROR | stderr | If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>` |
|
|