mzbac commited on
Commit
dd14e04
1 Parent(s): 7155bdf

Upload 8 files

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./merged_models_final",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "CodeLlama-34b-guanaco-gptq",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
gptq_model-4bit-128g.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2b070900e3bd7ac02c4601647f9992fb00d1d243136708a799563551ca70c39
3
  size 18329065000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce426912bf94b25e8712e1fdaef44fc7d2907cb6e9340ec410fcd7cb88ba364a
3
  size 18329065000
quantize_config.json CHANGED
@@ -2,7 +2,7 @@
2
  "bits": 4,
3
  "group_size": 128,
4
  "damp_percent": 0.01,
5
- "desc_act": false,
6
  "static_groups": false,
7
  "sym": true,
8
  "true_sequential": true,
 
2
  "bits": 4,
3
  "group_size": 128,
4
  "damp_percent": 0.01,
5
+ "desc_act": true,
6
  "static_groups": false,
7
  "sym": true,
8
  "true_sequential": true,