danielhanchen commited on
Commit
378a080
1 Parent(s): b5f28ab

Upload GemmaForCausalLM

Browse files
Files changed (1) hide show
  1. config.json +1 -1
config.json CHANGED
@@ -17,7 +17,7 @@
17
  "num_attention_heads": 8,
18
  "num_hidden_layers": 18,
19
  "num_key_value_heads": 1,
20
- "pad_token_id": 3,
21
  "quantization_config": {
22
  "_load_in_4bit": true,
23
  "_load_in_8bit": false,
 
17
  "num_attention_heads": 8,
18
  "num_hidden_layers": 18,
19
  "num_key_value_heads": 1,
20
+ "pad_token_id": 0,
21
  "quantization_config": {
22
  "_load_in_4bit": true,
23
  "_load_in_8bit": false,