diff --git "a/logs.txt" "b/logs.txt" new file mode 100644--- /dev/null +++ "b/logs.txt" @@ -0,0 +1,390 @@ +mlc_chat gen_mlc_chat_config --model /models/Mistral-7B-Instruct-v0.1 --quantization q3f16_1 --conv-template mistral_default --context-window-size 4096 --output /tmp/tmpkq3lw6xj +/home/hongyi/.local/bin/mlc_chat:6: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html + from pkg_resources import load_entry_point +[2023-11-28 20:08:19] INFO auto_config.py:62: Found model configuration: /models/Mistral-7B-Instruct-v0.1/config.json +[2023-11-28 20:08:19] INFO auto_config.py:99: Found model type: mistral. Use `--model-type` to override. +[2023-11-28 20:08:19] INFO mistral_model.py:43: context_window_size not found in config.json. Falling back to max_position_embeddings (32768) +[2023-11-28 20:08:19] INFO mistral_model.py:72: Using sliding window attention, setting context_window_size to -1 +[2023-11-28 20:08:19] INFO flags_model_config_override.py:24: Overriding context_window_size from -1 to 4096 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:77: [config.json] Setting bos_token_id: 1 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:77: [config.json] Setting eos_token_id: 2 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:81: Found generation_config.json: /models/Mistral-7B-Instruct-v0.1/generation_config.json +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:97: Found tokenizer config: /models/Mistral-7B-Instruct-v0.1/tokenizer.model. Copying to /tmp/tmpkq3lw6xj/tokenizer.model +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:97: Found tokenizer config: /models/Mistral-7B-Instruct-v0.1/tokenizer.json. Copying to /tmp/tmpkq3lw6xj/tokenizer.json +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:99: Not found tokenizer config: /models/Mistral-7B-Instruct-v0.1/vocab.json +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:99: Not found tokenizer config: /models/Mistral-7B-Instruct-v0.1/merges.txt +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:99: Not found tokenizer config: /models/Mistral-7B-Instruct-v0.1/added_tokens.json +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:97: Found tokenizer config: /models/Mistral-7B-Instruct-v0.1/tokenizer_config.json. Copying to /tmp/tmpkq3lw6xj/tokenizer_config.json +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:104: [System default] Setting pad_token_id: 0 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:104: [System default] Setting temperature: 0.7 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:104: [System default] Setting repetition_penalty: 1.0 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:104: [System default] Setting top_p: 0.95 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:104: [System default] Setting mean_gen_len: 128 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:104: [System default] Setting max_gen_len: 512 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:104: [System default] Setting shift_fill_factor: 0.3 +[2023-11-28 20:08:19] INFO gen_mlc_chat_config.py:109: Dumping configuration file to: /tmp/tmpkq3lw6xj/mlc-chat-config.json +mlc_chat convert_weight --model /models/Mistral-7B-Instruct-v0.1 --quantization q3f16_1 --source-format auto --output /tmp/tmpkq3lw6xj +/home/hongyi/.local/bin/mlc_chat:6: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html + from pkg_resources import load_entry_point +[2023-11-28 20:08:20] INFO auto_config.py:62: Found model configuration: /models/Mistral-7B-Instruct-v0.1/config.json +[2023-11-28 20:08:20] INFO auto_device.py:37: Using device: cuda:0. Use `--device` to override. +[2023-11-28 20:08:20] INFO auto_weight.py:70: Finding weights in: /models/Mistral-7B-Instruct-v0.1 +[2023-11-28 20:08:20] INFO auto_weight.py:120: Found source weight format: huggingface-torch. Source configuration: /models/Mistral-7B-Instruct-v0.1/pytorch_model.bin.index.json +[2023-11-28 20:08:20] INFO auto_weight.py:149: Not found Huggingface Safetensor +[2023-11-28 20:08:20] INFO auto_weight.py:106: Using source weight configuration: /models/Mistral-7B-Instruct-v0.1/pytorch_model.bin.index.json. Use `--source` to override. +[2023-11-28 20:08:20] INFO auto_weight.py:110: Using source weight format: huggingface-torch. Use `--source-format` to override. +[2023-11-28 20:08:20] INFO auto_config.py:99: Found model type: mistral. Use `--model-type` to override. +[2023-11-28 20:08:20] INFO mistral_model.py:43: context_window_size not found in config.json. Falling back to max_position_embeddings (32768) +[2023-11-28 20:08:20] INFO mistral_model.py:72: Using sliding window attention, setting context_window_size to -1 +Weight conversion with arguments: + --config /models/Mistral-7B-Instruct-v0.1/config.json + --quantization GroupQuantize(name='q3f16_1', kind='group-quant', group_size=40, quantize_dtype='int3', storage_dtype='uint32', model_dtype='float16', num_elem_per_storage=10, num_storage_per_group=4, max_int_value=3) + --model-type mistral + --device cuda:0 + --source /models/Mistral-7B-Instruct-v0.1/pytorch_model.bin.index.json + --source-format huggingface-torch + --output /tmp/tmpkq3lw6xj + 0%| | 0/195 [00:00