Anthonyg5005
commited on
Commit
•
559fb2e
1
Parent(s):
04c585a
add fast safetensors arg
Browse files
auto-exl2-upload/exl2-quant.py
CHANGED
@@ -196,7 +196,7 @@ for bpw in bpwvalue:
|
|
196 |
os.makedirs(f"{model}-exl2-{bpw}bpw", exist_ok=True) #create compile full directory
|
197 |
subprocess.run(f"{oscp} models{slsh}{model}{slsh}config.json {model}-exl2-{bpw}bpw-WD", shell=True) #copy config to working directory
|
198 |
#more settings exist in the convert.py script, to veiw them go to docs/convert.md or https://github.com/turboderp/exllamav2/blob/master/doc/convert.md
|
199 |
-
result = subprocess.run(f"{pyt} exllamav2/convert.py -i models/{model} -o {model}-exl2-{bpw}bpw-WD -cf {model}-exl2-{bpw}bpw -b {bpw}{mskip} -hb 8", shell=True) #run quantization and exit if failed (Credit to turbo for his dedication to exl2)
|
200 |
if result.returncode != 0:
|
201 |
print("Quantization failed.")
|
202 |
sys.exit("Exiting...")
|
|
|
196 |
os.makedirs(f"{model}-exl2-{bpw}bpw", exist_ok=True) #create compile full directory
|
197 |
subprocess.run(f"{oscp} models{slsh}{model}{slsh}config.json {model}-exl2-{bpw}bpw-WD", shell=True) #copy config to working directory
|
198 |
#more settings exist in the convert.py script, to veiw them go to docs/convert.md or https://github.com/turboderp/exllamav2/blob/master/doc/convert.md
|
199 |
+
result = subprocess.run(f"{pyt} exllamav2/convert.py -i models/{model} -o {model}-exl2-{bpw}bpw-WD -cf {model}-exl2-{bpw}bpw -b {bpw}{mskip} -hb 8 -fst", shell=True) #run quantization and exit if failed (Credit to turbo for his dedication to exl2)
|
200 |
if result.returncode != 0:
|
201 |
print("Quantization failed.")
|
202 |
sys.exit("Exiting...")
|