CUDA_VISIBLE_DEVICES=0 python llama.py ~/llama-13b c4 --wbits 4 --true-sequential --groupsize 128 --save_safetensors llama13b-4bit-128g.safetensors https://huggingface.co/huggyllama/llama-13b