benschlagman commited on
Commit
91c9083
1 Parent(s): 86527f1

Upload LlamaForCausalLM

Browse files
Files changed (3) hide show
  1. README.md +3 -1
  2. config.json +16 -2
  3. model.safetensors +3 -0
README.md CHANGED
@@ -1,6 +1,8 @@
1
  ---
2
  library_name: transformers
3
- tags: []
 
 
4
  ---
5
 
6
  # Model Card for Model ID
 
1
  ---
2
  library_name: transformers
3
+ tags:
4
+ - trl
5
+ - sft
6
  ---
7
 
8
  # Model Card for Model ID
config.json CHANGED
@@ -17,12 +17,26 @@
17
  "num_hidden_layers": 32,
18
  "num_key_value_heads": 32,
19
  "pretraining_tp": 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  "rms_norm_eps": 1e-05,
21
  "rope_scaling": null,
22
  "rope_theta": 10000.0,
23
  "tie_word_embeddings": false,
24
- "torch_dtype": "float16",
25
  "transformers_version": "4.38.2",
26
- "use_cache": true,
27
  "vocab_size": 32000
28
  }
 
17
  "num_hidden_layers": 32,
18
  "num_key_value_heads": 32,
19
  "pretraining_tp": 1,
20
+ "quantization_config": {
21
+ "_load_in_4bit": true,
22
+ "_load_in_8bit": false,
23
+ "bnb_4bit_compute_dtype": "float16",
24
+ "bnb_4bit_quant_type": "nf4",
25
+ "bnb_4bit_use_double_quant": false,
26
+ "llm_int8_enable_fp32_cpu_offload": false,
27
+ "llm_int8_has_fp16_weight": false,
28
+ "llm_int8_skip_modules": null,
29
+ "llm_int8_threshold": 6.0,
30
+ "load_in_4bit": true,
31
+ "load_in_8bit": false,
32
+ "quant_method": "bitsandbytes"
33
+ },
34
  "rms_norm_eps": 1e-05,
35
  "rope_scaling": null,
36
  "rope_theta": 10000.0,
37
  "tie_word_embeddings": false,
38
+ "torch_dtype": "float32",
39
  "transformers_version": "4.38.2",
40
+ "use_cache": false,
41
  "vocab_size": 32000
42
  }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9747feb8072382415ffb28c7e282b6b7803a79de1a4ca8071cd486fbfc34e30
3
+ size 4826780904