lebe1 commited on
Commit
e56d53e
1 Parent(s): 750949f

AutoGPTQ model for facebook/opt-125m: 8bits, gr128, desc_act=False

Browse files
Files changed (3) hide show
  1. config.json +28 -1
  2. generation_config.json +7 -0
  3. model.safetensors +3 -0
config.json CHANGED
@@ -23,8 +23,35 @@
23
  "num_hidden_layers": 12,
24
  "pad_token_id": 1,
25
  "prefix": "</s>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  "torch_dtype": "float16",
27
- "transformers_version": "4.36.0.dev0",
28
  "use_cache": true,
29
  "vocab_size": 50272,
30
  "word_embed_proj_dim": 768
 
23
  "num_hidden_layers": 12,
24
  "pad_token_id": 1,
25
  "prefix": "</s>",
26
+ "quantization_config": {
27
+ "batch_size": 1,
28
+ "bits": 2,
29
+ "block_name_to_quantize": "model.decoder.layers",
30
+ "cache_block_outputs": true,
31
+ "damp_percent": 0.1,
32
+ "dataset": "c4",
33
+ "desc_act": false,
34
+ "disable_exllama": false,
35
+ "exllama_config": {
36
+ "version": 2
37
+ },
38
+ "exllama_version": 2,
39
+ "group_size": 128,
40
+ "max_input_length": null,
41
+ "model_seqlen": 2048,
42
+ "module_name_preceding_first_block": [
43
+ "model.decoder.embed_tokens",
44
+ "model.decoder.embed_positions",
45
+ "model.decoder.final_layer_norm"
46
+ ],
47
+ "pad_token_id": null,
48
+ "quant_method": "gptq",
49
+ "sym": true,
50
+ "true_sequential": true,
51
+ "use_cuda_fp16": true
52
+ },
53
  "torch_dtype": "float16",
54
+ "transformers_version": "4.35.2",
55
  "use_cache": true,
56
  "vocab_size": 50272,
57
  "word_embed_proj_dim": 768
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.35.2"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:044bae54d8c04c7672f7d2d529e50c2211ad2776f41b697fa686fd9fec0fe2fd
3
+ size 103713936