n1ck-guo commited on
Commit
e177848
1 Parent(s): 46b72e8

Add ModelCloud/Qwen2.5-Coder-32B-Instruct-gptqmodel-4bit-vortex-v1 to eval queue

Browse files
ModelCloud/Qwen2.5-Coder-32B-Instruct-gptqmodel-4bit-vortex-v1_eval_request_False_GPTQ_4bit_int4_bfloat16.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "ModelCloud/Qwen2.5-Coder-32B-Instruct-gptqmodel-4bit-vortex-v1", "revision": "main", "private": false, "params": 21.17, "architectures": "Qwen2ForCausalLM", "quant_type": "GPTQ", "precision": "4bit", "model_params": 32.21, "model_size": 21.17, "weight_dtype": "int4", "compute_dtype": "bfloat16", "gguf_ftype": "*Q4_0.gguf", "hardware": "gpu", "status": "Pending", "submitted_time": "2024-12-03T01:41:25Z", "model_type": "quantization", "job_id": -1, "job_start_time": null, "scripts": "ITREX"}